Draft plugin architecture and connector docs (#1474)

* docs: outline plugin architecture and connector design

* feat(den-api): add plugin architecture admin and webhook foundation

* refactor(den-api): rename plugin routes and split github connector env

* fix(den-api): scope plugin connector records by organization

---------

Co-authored-by: src-opn <src-opn@users.noreply.github.com>
This commit is contained in:
Source Open
2026-04-17 16:10:23 -07:00
committed by GitHub
parent f19b3fe680
commit 2c4bd553cb
33 changed files with 9613 additions and 0 deletions

View File

@@ -16,6 +16,7 @@ import { registerAuthRoutes } from "./routes/auth/index.js"
import { registerMeRoutes } from "./routes/me/index.js"
import { registerOrgRoutes } from "./routes/org/index.js"
import { registerVersionRoutes } from "./routes/version/index.js"
import { registerWebhookRoutes } from "./routes/webhooks/index.js"
import { registerWorkerRoutes } from "./routes/workers/index.js"
import type { AuthContextVariables } from "./session.js"
import { sessionMiddleware } from "./session.js"
@@ -108,6 +109,7 @@ registerAuthRoutes(app)
registerMeRoutes(app)
registerOrgRoutes(app)
registerVersionRoutes(app)
registerWebhookRoutes(app)
registerWorkerRoutes(app)
app.get(

View File

@@ -13,6 +13,11 @@ const EnvSchema = z.object({
DEN_BETTER_AUTH_TRUSTED_ORIGINS: z.string().optional(),
GITHUB_CLIENT_ID: z.string().optional(),
GITHUB_CLIENT_SECRET: z.string().optional(),
GITHUB_CONNECTOR_APP_ID: z.string().optional(),
GITHUB_CONNECTOR_APP_CLIENT_ID: z.string().optional(),
GITHUB_CONNECTOR_APP_CLIENT_SECRET: z.string().optional(),
GITHUB_CONNECTOR_APP_PRIVATE_KEY: z.string().optional(),
GITHUB_CONNECTOR_APP_WEBHOOK_SECRET: z.string().optional(),
GOOGLE_CLIENT_ID: z.string().optional(),
GOOGLE_CLIENT_SECRET: z.string().optional(),
LOOPS_API_KEY: z.string().optional(),
@@ -158,6 +163,13 @@ export const env = {
clientId: optionalString(parsed.GITHUB_CLIENT_ID),
clientSecret: optionalString(parsed.GITHUB_CLIENT_SECRET),
},
githubConnectorApp: {
appId: optionalString(parsed.GITHUB_CONNECTOR_APP_ID),
clientId: optionalString(parsed.GITHUB_CONNECTOR_APP_CLIENT_ID),
clientSecret: optionalString(parsed.GITHUB_CONNECTOR_APP_CLIENT_SECRET),
privateKey: optionalString(parsed.GITHUB_CONNECTOR_APP_PRIVATE_KEY),
webhookSecret: optionalString(parsed.GITHUB_CONNECTOR_APP_WEBHOOK_SECRET),
},
google: {
clientId: optionalString(parsed.GOOGLE_CLIENT_ID),
clientSecret: optionalString(parsed.GOOGLE_CLIENT_SECRET),

View File

@@ -5,6 +5,7 @@ import { registerOrgCoreRoutes } from "./core.js"
import { registerOrgInvitationRoutes } from "./invitations.js"
import { registerOrgLlmProviderRoutes } from "./llm-providers.js"
import { registerOrgMemberRoutes } from "./members.js"
import { registerPluginArchRoutes } from "./plugin-system/routes.js"
import { registerOrgRoleRoutes } from "./roles.js"
import { registerOrgSkillRoutes } from "./skills.js"
import { registerOrgTeamRoutes } from "./teams.js"
@@ -16,6 +17,7 @@ export function registerOrgRoutes<T extends { Variables: OrgRouteVariables }>(ap
registerOrgInvitationRoutes(app)
registerOrgLlmProviderRoutes(app)
registerOrgMemberRoutes(app)
registerPluginArchRoutes(app)
registerOrgRoleRoutes(app)
registerOrgSkillRoutes(app)
registerOrgTeamRoutes(app)

View File

@@ -0,0 +1,230 @@
import { and, eq, inArray, isNull } from "@openwork-ee/den-db/drizzle"
import {
ConfigObjectAccessGrantTable,
ConfigObjectTable,
ConnectorInstanceAccessGrantTable,
ConnectorInstanceTable,
PluginAccessGrantTable,
PluginConfigObjectTable,
PluginTable,
} from "@openwork-ee/den-db/schema"
import type { MemberTeamSummary, OrganizationContext } from "../../../orgs.js"
import { db } from "../../../db.js"
import { memberHasRole } from "../shared.js"
export type PluginArchResourceKind = "config_object" | "connector_instance" | "plugin"
export type PluginArchRole = "viewer" | "editor" | "manager"
export type PluginArchCapability = "config_object.create" | "connector_account.create" | "connector_instance.create" | "plugin.create"
export type PluginArchActorContext = {
memberTeams: MemberTeamSummary[]
organizationContext: OrganizationContext
}
type MemberId = OrganizationContext["currentMember"]["id"]
type TeamId = MemberTeamSummary["id"]
type ConfigObjectId = typeof ConfigObjectTable.$inferSelect.id
type PluginId = typeof PluginTable.$inferSelect.id
type ConnectorInstanceId = typeof ConnectorInstanceTable.$inferSelect.id
type ConfigObjectGrantRow = Pick<typeof ConfigObjectAccessGrantTable.$inferSelect, "orgMembershipId" | "orgWide" | "removedAt" | "role" | "teamId">
type PluginGrantRow = Pick<typeof PluginAccessGrantTable.$inferSelect, "orgMembershipId" | "orgWide" | "removedAt" | "role" | "teamId">
type ConnectorInstanceGrantRow = Pick<typeof ConnectorInstanceAccessGrantTable.$inferSelect, "orgMembershipId" | "orgWide" | "removedAt" | "role" | "teamId">
type GrantRow = ConfigObjectGrantRow | PluginGrantRow | ConnectorInstanceGrantRow
type PluginResourceLookupInput = {
context: PluginArchActorContext
resourceId: PluginId
resourceKind: "plugin"
}
type ConnectorInstanceResourceLookupInput = {
context: PluginArchActorContext
resourceId: ConnectorInstanceId
resourceKind: "connector_instance"
}
type ConfigObjectResourceLookupInput = {
context: PluginArchActorContext
resourceId: ConfigObjectId
resourceKind: "config_object"
}
type ResourceLookupInput =
| PluginResourceLookupInput
| ConnectorInstanceResourceLookupInput
| ConfigObjectResourceLookupInput
type RequireResourceRoleInput = ResourceLookupInput & { role: PluginArchRole }
export class PluginArchAuthorizationError extends Error {
constructor(
readonly status: 403,
readonly error: "forbidden",
message: string,
) {
super(message)
this.name = "PluginArchAuthorizationError"
}
}
const rolePriority: Record<PluginArchRole, number> = {
viewer: 1,
editor: 2,
manager: 3,
}
function maxRole(current: PluginArchRole | null, candidate: PluginArchRole | null) {
if (!candidate) return current
if (!current) return candidate
return rolePriority[candidate] > rolePriority[current] ? candidate : current
}
export function isPluginArchOrgAdmin(context: PluginArchActorContext) {
return context.organizationContext.currentMember.isOwner || memberHasRole(context.organizationContext.currentMember.role, "admin")
}
export function hasPluginArchCapability(context: PluginArchActorContext, _capability: PluginArchCapability) {
return isPluginArchOrgAdmin(context)
}
function roleSatisfies(role: PluginArchRole | null, required: PluginArchRole) {
if (!role) return false
return rolePriority[role] >= rolePriority[required]
}
export function resolvePluginArchGrantRole(input: {
grants: GrantRow[]
memberId: MemberId
teamIds: TeamId[]
}) {
const teamIds = new Set(input.teamIds)
let resolved: PluginArchRole | null = null
for (const grant of input.grants) {
if (grant.removedAt) continue
const applies = grant.orgWide || grant.orgMembershipId === input.memberId || (grant.teamId ? teamIds.has(grant.teamId) : false)
if (!applies) continue
resolved = maxRole(resolved, grant.role)
}
return resolved
}
async function resolveGrantRole(input: {
grants: GrantRow[]
context: PluginArchActorContext
}) {
return resolvePluginArchGrantRole({
grants: input.grants,
memberId: input.context.organizationContext.currentMember.id,
teamIds: input.context.memberTeams.map((team) => team.id),
})
}
async function resolvePluginRoleForIds(context: PluginArchActorContext, pluginIds: PluginId[]) {
if (pluginIds.length === 0) {
return null
}
if (isPluginArchOrgAdmin(context)) {
return "manager" satisfies PluginArchRole
}
const grants = await db
.select({
orgMembershipId: PluginAccessGrantTable.orgMembershipId,
orgWide: PluginAccessGrantTable.orgWide,
removedAt: PluginAccessGrantTable.removedAt,
role: PluginAccessGrantTable.role,
teamId: PluginAccessGrantTable.teamId,
})
.from(PluginAccessGrantTable)
.where(inArray(PluginAccessGrantTable.pluginId, pluginIds))
return resolveGrantRole({ context, grants })
}
export async function resolvePluginArchResourceRole(input: ResourceLookupInput) {
if (isPluginArchOrgAdmin(input.context)) {
return "manager" satisfies PluginArchRole
}
if (input.resourceKind === "plugin") {
const grants = await db
.select({
orgMembershipId: PluginAccessGrantTable.orgMembershipId,
orgWide: PluginAccessGrantTable.orgWide,
removedAt: PluginAccessGrantTable.removedAt,
role: PluginAccessGrantTable.role,
teamId: PluginAccessGrantTable.teamId,
})
.from(PluginAccessGrantTable)
.where(eq(PluginAccessGrantTable.pluginId, input.resourceId))
return resolveGrantRole({ context: input.context, grants })
}
if (input.resourceKind === "connector_instance") {
const grants = await db
.select({
orgMembershipId: ConnectorInstanceAccessGrantTable.orgMembershipId,
orgWide: ConnectorInstanceAccessGrantTable.orgWide,
removedAt: ConnectorInstanceAccessGrantTable.removedAt,
role: ConnectorInstanceAccessGrantTable.role,
teamId: ConnectorInstanceAccessGrantTable.teamId,
})
.from(ConnectorInstanceAccessGrantTable)
.where(eq(ConnectorInstanceAccessGrantTable.connectorInstanceId, input.resourceId))
return resolveGrantRole({ context: input.context, grants })
}
const directGrants = await db
.select({
orgMembershipId: ConfigObjectAccessGrantTable.orgMembershipId,
orgWide: ConfigObjectAccessGrantTable.orgWide,
removedAt: ConfigObjectAccessGrantTable.removedAt,
role: ConfigObjectAccessGrantTable.role,
teamId: ConfigObjectAccessGrantTable.teamId,
})
.from(ConfigObjectAccessGrantTable)
.where(eq(ConfigObjectAccessGrantTable.configObjectId, input.resourceId))
let resolved = await resolveGrantRole({ context: input.context, grants: directGrants })
if (resolved) {
return resolved
}
const memberships = await db
.select({ pluginId: PluginConfigObjectTable.pluginId })
.from(PluginConfigObjectTable)
.where(and(eq(PluginConfigObjectTable.configObjectId, input.resourceId), isNull(PluginConfigObjectTable.removedAt)))
const pluginRole = await resolvePluginRoleForIds(input.context, memberships.map((membership) => membership.pluginId))
resolved = maxRole(resolved, pluginRole ? "viewer" : null)
return resolved
}
export async function requirePluginArchCapability(context: PluginArchActorContext, capability: PluginArchCapability) {
if (hasPluginArchCapability(context, capability)) {
return
}
throw new PluginArchAuthorizationError(403, "forbidden", `Missing organization capability: ${capability}`)
}
export async function requirePluginArchResourceRole(input: {
context: PluginArchActorContext
resourceId: ConfigObjectId | ConnectorInstanceId | PluginId
resourceKind: PluginArchResourceKind
role: PluginArchRole
}) {
const resolved = await resolvePluginArchResourceRole(input as RequireResourceRoleInput)
if (roleSatisfies(resolved, input.role)) {
return resolved
}
throw new PluginArchAuthorizationError(
403,
"forbidden",
`Missing ${input.role} access for ${input.resourceKind.replace(/_/g, " ")}.`,
)
}

View File

@@ -0,0 +1,781 @@
import type { z } from "zod"
import {
accessGrantListResponseSchema,
accessGrantMutationResponseSchema,
configObjectAccessGrantParamsSchema,
configObjectCreateSchema,
configObjectCreateVersionSchema,
configObjectDetailResponseSchema,
configObjectListQuerySchema,
configObjectListResponseSchema,
configObjectMutationResponseSchema,
configObjectParamsSchema,
configObjectPluginAttachSchema,
configObjectVersionDetailResponseSchema,
configObjectVersionListQuerySchema,
configObjectVersionListResponseSchema,
configObjectVersionParamsSchema,
connectorAccountCreateSchema,
connectorAccountDetailResponseSchema,
connectorAccountDisconnectSchema,
connectorAccountListQuerySchema,
connectorAccountListResponseSchema,
connectorAccountMutationResponseSchema,
connectorAccountParamsSchema,
connectorAccountRepositoryParamsSchema,
connectorInstanceAccessGrantParamsSchema,
connectorInstanceCreateSchema,
connectorInstanceDetailResponseSchema,
connectorInstanceListQuerySchema,
connectorInstanceListResponseSchema,
connectorInstanceMutationResponseSchema,
connectorInstanceParamsSchema,
connectorInstanceUpdateSchema,
connectorMappingCreateSchema,
connectorMappingListQuerySchema,
connectorMappingListResponseSchema,
connectorMappingMutationResponseSchema,
connectorMappingParamsSchema,
connectorMappingUpdateSchema,
connectorSyncAsyncResponseSchema,
connectorSyncEventDetailResponseSchema,
connectorSyncEventListQuerySchema,
connectorSyncEventListResponseSchema,
connectorSyncEventParamsSchema,
connectorTargetCreateSchema,
connectorTargetDetailResponseSchema,
connectorTargetListQuerySchema,
connectorTargetListResponseSchema,
connectorTargetMutationResponseSchema,
connectorTargetParamsSchema,
connectorTargetUpdateSchema,
githubConnectorAccountCreateSchema,
githubConnectorSetupSchema,
githubRepositoryListQuerySchema,
githubRepositoryListResponseSchema,
githubSetupResponseSchema,
githubValidateTargetResponseSchema,
githubValidateTargetSchema,
githubWebhookAcceptedResponseSchema,
githubWebhookHeadersSchema,
githubWebhookIgnoredResponseSchema,
githubWebhookRawBodySchema,
githubWebhookUnauthorizedResponseSchema,
pluginAccessGrantParamsSchema,
pluginConfigObjectParamsSchema,
pluginCreateSchema,
pluginDetailResponseSchema,
pluginListQuerySchema,
pluginListResponseSchema,
pluginMembershipMutationResponseSchema,
pluginMembershipListResponseSchema,
pluginMembershipWriteSchema,
pluginMutationResponseSchema,
pluginParamsSchema,
pluginUpdateSchema,
resourceAccessGrantWriteSchema,
} from "./schemas.js"
import { orgIdParamSchema } from "../shared.js"
type EndpointMethod = "DELETE" | "GET" | "PATCH" | "POST"
type EndpointAudience = "admin" | "public_webhook"
type EndpointTag = "Config Objects" | "Plugins" | "Connectors" | "GitHub" | "Webhooks"
type EndpointContract = {
audience: EndpointAudience
description: string
method: EndpointMethod
path: string
request?: {
body?: z.ZodTypeAny
headers?: z.ZodTypeAny
params?: z.ZodTypeAny
query?: z.ZodTypeAny
}
response: {
description: string
schema?: z.ZodTypeAny
status: 200 | 201 | 202 | 204 | 401
}
tag: EndpointTag
}
type DeferredEndpointContract = {
description: string
method: EndpointMethod
path: string
reason: string
tag: EndpointTag
}
const orgBasePath = "/v1/orgs/:orgId"
export const pluginArchRoutePaths = {
configObjects: `${orgBasePath}/config-objects`,
configObject: `${orgBasePath}/config-objects/:configObjectId`,
configObjectArchive: `${orgBasePath}/config-objects/:configObjectId/archive`,
configObjectDelete: `${orgBasePath}/config-objects/:configObjectId/delete`,
configObjectRestore: `${orgBasePath}/config-objects/:configObjectId/restore`,
configObjectPlugins: `${orgBasePath}/config-objects/:configObjectId/plugins`,
configObjectPlugin: `${orgBasePath}/config-objects/:configObjectId/plugins/:pluginId`,
configObjectAccess: `${orgBasePath}/config-objects/:configObjectId/access`,
configObjectAccessGrant: `${orgBasePath}/config-objects/:configObjectId/access/:grantId`,
configObjectVersions: `${orgBasePath}/config-objects/:configObjectId/versions`,
configObjectVersion: `${orgBasePath}/config-objects/:configObjectId/versions/:versionId`,
configObjectLatestVersion: `${orgBasePath}/config-objects/:configObjectId/versions/latest`,
configObjectCompareVersions: `${orgBasePath}/config-objects/:configObjectId/versions/compare`,
skills: `${orgBasePath}/skills`,
agents: `${orgBasePath}/agents`,
commands: `${orgBasePath}/commands`,
tools: `${orgBasePath}/tools`,
mcps: `${orgBasePath}/mcps`,
plugins: `${orgBasePath}/plugins`,
plugin: `${orgBasePath}/plugins/:pluginId`,
pluginArchive: `${orgBasePath}/plugins/:pluginId/archive`,
pluginRestore: `${orgBasePath}/plugins/:pluginId/restore`,
pluginConfigObjects: `${orgBasePath}/plugins/:pluginId/config-objects`,
pluginConfigObject: `${orgBasePath}/plugins/:pluginId/config-objects/:configObjectId`,
pluginResolved: `${orgBasePath}/plugins/:pluginId/resolved`,
pluginReleases: `${orgBasePath}/plugins/:pluginId/releases`,
pluginAccess: `${orgBasePath}/plugins/:pluginId/access`,
pluginAccessGrant: `${orgBasePath}/plugins/:pluginId/access/:grantId`,
connectorAccounts: `${orgBasePath}/connector-accounts`,
connectorAccount: `${orgBasePath}/connector-accounts/:connectorAccountId`,
connectorAccountDisconnect: `${orgBasePath}/connector-accounts/:connectorAccountId/disconnect`,
connectorInstances: `${orgBasePath}/connector-instances`,
connectorInstance: `${orgBasePath}/connector-instances/:connectorInstanceId`,
connectorInstanceArchive: `${orgBasePath}/connector-instances/:connectorInstanceId/archive`,
connectorInstanceDisable: `${orgBasePath}/connector-instances/:connectorInstanceId/disable`,
connectorInstanceEnable: `${orgBasePath}/connector-instances/:connectorInstanceId/enable`,
connectorInstanceAccess: `${orgBasePath}/connector-instances/:connectorInstanceId/access`,
connectorInstanceAccessGrant: `${orgBasePath}/connector-instances/:connectorInstanceId/access/:grantId`,
connectorTargets: `${orgBasePath}/connector-instances/:connectorInstanceId/targets`,
connectorTarget: `${orgBasePath}/connector-targets/:connectorTargetId`,
connectorTargetResync: `${orgBasePath}/connector-targets/:connectorTargetId/resync`,
connectorTargetMappings: `${orgBasePath}/connector-targets/:connectorTargetId/mappings`,
connectorMapping: `${orgBasePath}/connector-mappings/:connectorMappingId`,
connectorMappingPreview: `${orgBasePath}/connector-mappings/:connectorMappingId/preview`,
connectorSyncEvents: `${orgBasePath}/connector-sync-events`,
connectorSyncEvent: `${orgBasePath}/connector-sync-events/:connectorSyncEventId`,
connectorSyncEventRetry: `${orgBasePath}/connector-sync-events/:connectorSyncEventId/retry`,
githubSetup: `${orgBasePath}/connectors/github/setup`,
githubAccounts: `${orgBasePath}/connectors/github/accounts`,
githubAccountRepositories: `${orgBasePath}/connectors/github/accounts/:connectorAccountId/repositories`,
githubValidateTarget: `${orgBasePath}/connectors/github/validate-target`,
githubWebhookIngress: "/api/webhooks/connectors/github",
} as const
export const pluginArchEndpointContracts: Record<string, EndpointContract> = {
listConfigObjects: {
audience: "admin",
description: "List current config object projections with search and connector filters.",
method: "GET",
path: pluginArchRoutePaths.configObjects,
request: { params: orgIdParamSchema, query: configObjectListQuerySchema },
response: { description: "Current config object rows.", schema: configObjectListResponseSchema, status: 200 },
tag: "Config Objects",
},
getConfigObject: {
audience: "admin",
description: "Get one config object with its latest version projection.",
method: "GET",
path: pluginArchRoutePaths.configObject,
request: { params: configObjectParamsSchema },
response: { description: "Current config object detail.", schema: configObjectDetailResponseSchema, status: 200 },
tag: "Config Objects",
},
createConfigObject: {
audience: "admin",
description: "Create a cloud or imported config object and optionally attach it to plugins.",
method: "POST",
path: pluginArchRoutePaths.configObjects,
request: { body: configObjectCreateSchema, params: orgIdParamSchema },
response: { description: "Config object created successfully.", schema: configObjectMutationResponseSchema, status: 201 },
tag: "Config Objects",
},
createConfigObjectVersion: {
audience: "admin",
description: "Create a new immutable version for an existing config object.",
method: "POST",
path: pluginArchRoutePaths.configObjectVersions,
request: { body: configObjectCreateVersionSchema, params: configObjectParamsSchema },
response: { description: "Latest config object detail after version creation.", schema: configObjectMutationResponseSchema, status: 201 },
tag: "Config Objects",
},
archiveConfigObject: {
audience: "admin",
description: "Archive a config object without removing history.",
method: "POST",
path: pluginArchRoutePaths.configObjectArchive,
request: { params: configObjectParamsSchema },
response: { description: "Archived config object detail.", schema: configObjectMutationResponseSchema, status: 200 },
tag: "Config Objects",
},
deleteConfigObject: {
audience: "admin",
description: "Soft-delete a config object while preserving history.",
method: "POST",
path: pluginArchRoutePaths.configObjectDelete,
request: { params: configObjectParamsSchema },
response: { description: "Deleted config object detail.", schema: configObjectMutationResponseSchema, status: 200 },
tag: "Config Objects",
},
restoreConfigObject: {
audience: "admin",
description: "Restore a deleted or archived config object.",
method: "POST",
path: pluginArchRoutePaths.configObjectRestore,
request: { params: configObjectParamsSchema },
response: { description: "Restored config object detail.", schema: configObjectMutationResponseSchema, status: 200 },
tag: "Config Objects",
},
listConfigObjectPlugins: {
audience: "admin",
description: "List the plugins that currently include a config object.",
method: "GET",
path: pluginArchRoutePaths.configObjectPlugins,
request: { params: configObjectParamsSchema },
response: { description: "Plugin memberships for the config object.", schema: pluginMembershipListResponseSchema, status: 200 },
tag: "Config Objects",
},
attachConfigObjectToPlugin: {
audience: "admin",
description: "Attach a config object to a plugin using plugin-scoped write access.",
method: "POST",
path: pluginArchRoutePaths.configObjectPlugins,
request: { body: configObjectPluginAttachSchema, params: configObjectParamsSchema },
response: { description: "Plugin membership created successfully.", schema: pluginMembershipMutationResponseSchema, status: 201 },
tag: "Config Objects",
},
removeConfigObjectFromPlugin: {
audience: "admin",
description: "Remove one active plugin membership from a config object.",
method: "DELETE",
path: pluginArchRoutePaths.configObjectPlugin,
request: { params: configObjectParamsSchema.extend({ pluginId: pluginParamsSchema.shape.pluginId }) },
response: { description: "Plugin membership removed successfully.", status: 204 },
tag: "Config Objects",
},
listConfigObjectAccess: {
audience: "admin",
description: "List direct, team, and org-wide grants for a config object.",
method: "GET",
path: pluginArchRoutePaths.configObjectAccess,
request: { params: configObjectParamsSchema },
response: { description: "Config object access grants.", schema: accessGrantListResponseSchema, status: 200 },
tag: "Config Objects",
},
grantConfigObjectAccess: {
audience: "admin",
description: "Create one direct, team, or org-wide access grant for a config object.",
method: "POST",
path: pluginArchRoutePaths.configObjectAccess,
request: { body: resourceAccessGrantWriteSchema, params: configObjectParamsSchema },
response: { description: "Config object access grant created successfully.", schema: accessGrantMutationResponseSchema, status: 201 },
tag: "Config Objects",
},
revokeConfigObjectAccess: {
audience: "admin",
description: "Soft-revoke one config object access grant.",
method: "DELETE",
path: pluginArchRoutePaths.configObjectAccessGrant,
request: { params: configObjectAccessGrantParamsSchema },
response: { description: "Config object access grant revoked successfully.", status: 204 },
tag: "Config Objects",
},
listConfigObjectVersions: {
audience: "admin",
description: "List immutable versions for a config object.",
method: "GET",
path: pluginArchRoutePaths.configObjectVersions,
request: { params: configObjectParamsSchema, query: configObjectVersionListQuerySchema },
response: { description: "Config object versions.", schema: configObjectVersionListResponseSchema, status: 200 },
tag: "Config Objects",
},
getConfigObjectVersion: {
audience: "admin",
description: "Get one immutable config object version.",
method: "GET",
path: pluginArchRoutePaths.configObjectVersion,
request: { params: configObjectVersionParamsSchema },
response: { description: "Config object version detail.", schema: configObjectVersionDetailResponseSchema, status: 200 },
tag: "Config Objects",
},
getLatestConfigObjectVersion: {
audience: "admin",
description: "Resolve the latest config object version using created_at and id ordering.",
method: "GET",
path: pluginArchRoutePaths.configObjectLatestVersion,
request: { params: configObjectParamsSchema },
response: { description: "Latest config object version detail.", schema: configObjectVersionDetailResponseSchema, status: 200 },
tag: "Config Objects",
},
listPlugins: {
audience: "admin",
description: "List accessible plugins for the organization.",
method: "GET",
path: pluginArchRoutePaths.plugins,
request: { params: orgIdParamSchema, query: pluginListQuerySchema },
response: { description: "Plugin list.", schema: pluginListResponseSchema, status: 200 },
tag: "Plugins",
},
getPlugin: {
audience: "admin",
description: "Get one plugin and its current metadata.",
method: "GET",
path: pluginArchRoutePaths.plugin,
request: { params: pluginParamsSchema },
response: { description: "Plugin detail.", schema: pluginDetailResponseSchema, status: 200 },
tag: "Plugins",
},
createPlugin: {
audience: "admin",
description: "Create a private-by-default plugin.",
method: "POST",
path: pluginArchRoutePaths.plugins,
request: { body: pluginCreateSchema, params: orgIdParamSchema },
response: { description: "Plugin created successfully.", schema: pluginMutationResponseSchema, status: 201 },
tag: "Plugins",
},
updatePlugin: {
audience: "admin",
description: "Patch plugin metadata.",
method: "PATCH",
path: pluginArchRoutePaths.plugin,
request: { body: pluginUpdateSchema, params: pluginParamsSchema },
response: { description: "Plugin updated successfully.", schema: pluginMutationResponseSchema, status: 200 },
tag: "Plugins",
},
archivePlugin: {
audience: "admin",
description: "Archive a plugin without deleting membership history.",
method: "POST",
path: pluginArchRoutePaths.pluginArchive,
request: { params: pluginParamsSchema },
response: { description: "Archived plugin detail.", schema: pluginMutationResponseSchema, status: 200 },
tag: "Plugins",
},
restorePlugin: {
audience: "admin",
description: "Restore an archived or deleted plugin.",
method: "POST",
path: pluginArchRoutePaths.pluginRestore,
request: { params: pluginParamsSchema },
response: { description: "Restored plugin detail.", schema: pluginMutationResponseSchema, status: 200 },
tag: "Plugins",
},
listPluginConfigObjects: {
audience: "admin",
description: "List plugin memberships and the current config object projections they reference.",
method: "GET",
path: pluginArchRoutePaths.pluginConfigObjects,
request: { params: pluginParamsSchema },
response: { description: "Plugin memberships.", schema: pluginMembershipListResponseSchema, status: 200 },
tag: "Plugins",
},
addPluginConfigObject: {
audience: "admin",
description: "Add a config object to a plugin using plugin-scoped write access.",
method: "POST",
path: pluginArchRoutePaths.pluginConfigObjects,
request: { body: pluginMembershipWriteSchema, params: pluginParamsSchema },
response: { description: "Plugin membership created successfully.", schema: pluginMembershipMutationResponseSchema, status: 201 },
tag: "Plugins",
},
removePluginConfigObject: {
audience: "admin",
description: "Remove one config object membership from a plugin.",
method: "DELETE",
path: pluginArchRoutePaths.pluginConfigObject,
request: { params: pluginConfigObjectParamsSchema },
response: { description: "Plugin membership removed successfully.", status: 204 },
tag: "Plugins",
},
getResolvedPlugin: {
audience: "admin",
description: "Preview the resolved latest-version members of a plugin without invoking delivery.",
method: "GET",
path: pluginArchRoutePaths.pluginResolved,
request: { params: pluginParamsSchema },
response: { description: "Resolved plugin membership view.", schema: pluginMembershipListResponseSchema, status: 200 },
tag: "Plugins",
},
listPluginAccess: {
audience: "admin",
description: "List direct, team, and org-wide grants for a plugin.",
method: "GET",
path: pluginArchRoutePaths.pluginAccess,
request: { params: pluginParamsSchema },
response: { description: "Plugin access grants.", schema: accessGrantListResponseSchema, status: 200 },
tag: "Plugins",
},
grantPluginAccess: {
audience: "admin",
description: "Create one direct, team, or org-wide access grant for a plugin.",
method: "POST",
path: pluginArchRoutePaths.pluginAccess,
request: { body: resourceAccessGrantWriteSchema, params: pluginParamsSchema },
response: { description: "Plugin access grant created successfully.", schema: accessGrantMutationResponseSchema, status: 201 },
tag: "Plugins",
},
revokePluginAccess: {
audience: "admin",
description: "Soft-revoke one plugin access grant.",
method: "DELETE",
path: pluginArchRoutePaths.pluginAccessGrant,
request: { params: pluginAccessGrantParamsSchema },
response: { description: "Plugin access grant revoked successfully.", status: 204 },
tag: "Plugins",
},
listConnectorAccounts: {
audience: "admin",
description: "List connector accounts such as GitHub App installations available to the org.",
method: "GET",
path: pluginArchRoutePaths.connectorAccounts,
request: { params: orgIdParamSchema, query: connectorAccountListQuerySchema },
response: { description: "Connector account list.", schema: connectorAccountListResponseSchema, status: 200 },
tag: "Connectors",
},
createConnectorAccount: {
audience: "admin",
description: "Create a reusable connector account record.",
method: "POST",
path: pluginArchRoutePaths.connectorAccounts,
request: { body: connectorAccountCreateSchema, params: orgIdParamSchema },
response: { description: "Connector account created successfully.", schema: connectorAccountMutationResponseSchema, status: 201 },
tag: "Connectors",
},
getConnectorAccount: {
audience: "admin",
description: "Get one connector account record.",
method: "GET",
path: pluginArchRoutePaths.connectorAccount,
request: { params: connectorAccountParamsSchema },
response: { description: "Connector account detail.", schema: connectorAccountDetailResponseSchema, status: 200 },
tag: "Connectors",
},
disconnectConnectorAccount: {
audience: "admin",
description: "Disconnect one connector account without deleting historical sync state.",
method: "POST",
path: pluginArchRoutePaths.connectorAccountDisconnect,
request: { body: connectorAccountDisconnectSchema, params: connectorAccountParamsSchema },
response: { description: "Connector account disconnected successfully.", schema: connectorAccountMutationResponseSchema, status: 200 },
tag: "Connectors",
},
listConnectorInstances: {
audience: "admin",
description: "List configured connector instances for the org.",
method: "GET",
path: pluginArchRoutePaths.connectorInstances,
request: { params: orgIdParamSchema, query: connectorInstanceListQuerySchema },
response: { description: "Connector instance list.", schema: connectorInstanceListResponseSchema, status: 200 },
tag: "Connectors",
},
createConnectorInstance: {
audience: "admin",
description: "Create a connector instance backed by one connector account.",
method: "POST",
path: pluginArchRoutePaths.connectorInstances,
request: { body: connectorInstanceCreateSchema, params: orgIdParamSchema },
response: { description: "Connector instance created successfully.", schema: connectorInstanceMutationResponseSchema, status: 201 },
tag: "Connectors",
},
getConnectorInstance: {
audience: "admin",
description: "Get one connector instance.",
method: "GET",
path: pluginArchRoutePaths.connectorInstance,
request: { params: connectorInstanceParamsSchema },
response: { description: "Connector instance detail.", schema: connectorInstanceDetailResponseSchema, status: 200 },
tag: "Connectors",
},
updateConnectorInstance: {
audience: "admin",
description: "Patch connector instance metadata or config.",
method: "PATCH",
path: pluginArchRoutePaths.connectorInstance,
request: { body: connectorInstanceUpdateSchema, params: connectorInstanceParamsSchema },
response: { description: "Connector instance updated successfully.", schema: connectorInstanceMutationResponseSchema, status: 200 },
tag: "Connectors",
},
archiveConnectorInstance: {
audience: "admin",
description: "Archive a connector instance.",
method: "POST",
path: pluginArchRoutePaths.connectorInstanceArchive,
request: { params: connectorInstanceParamsSchema },
response: { description: "Connector instance archived successfully.", schema: connectorInstanceMutationResponseSchema, status: 200 },
tag: "Connectors",
},
disableConnectorInstance: {
audience: "admin",
description: "Disable sync execution for a connector instance.",
method: "POST",
path: pluginArchRoutePaths.connectorInstanceDisable,
request: { params: connectorInstanceParamsSchema },
response: { description: "Connector instance disabled successfully.", schema: connectorInstanceMutationResponseSchema, status: 200 },
tag: "Connectors",
},
enableConnectorInstance: {
audience: "admin",
description: "Re-enable sync execution for a connector instance.",
method: "POST",
path: pluginArchRoutePaths.connectorInstanceEnable,
request: { params: connectorInstanceParamsSchema },
response: { description: "Connector instance enabled successfully.", schema: connectorInstanceMutationResponseSchema, status: 200 },
tag: "Connectors",
},
listConnectorInstanceAccess: {
audience: "admin",
description: "List direct, team, and org-wide grants for a connector instance.",
method: "GET",
path: pluginArchRoutePaths.connectorInstanceAccess,
request: { params: connectorInstanceParamsSchema },
response: { description: "Connector instance access grants.", schema: accessGrantListResponseSchema, status: 200 },
tag: "Connectors",
},
grantConnectorInstanceAccess: {
audience: "admin",
description: "Create one direct, team, or org-wide access grant for a connector instance.",
method: "POST",
path: pluginArchRoutePaths.connectorInstanceAccess,
request: { body: resourceAccessGrantWriteSchema, params: connectorInstanceParamsSchema },
response: { description: "Connector instance access grant created successfully.", schema: accessGrantMutationResponseSchema, status: 201 },
tag: "Connectors",
},
revokeConnectorInstanceAccess: {
audience: "admin",
description: "Soft-revoke one connector instance access grant.",
method: "DELETE",
path: pluginArchRoutePaths.connectorInstanceAccessGrant,
request: { params: connectorInstanceAccessGrantParamsSchema },
response: { description: "Connector instance access grant revoked successfully.", status: 204 },
tag: "Connectors",
},
listConnectorTargets: {
audience: "admin",
description: "List external targets configured under a connector instance.",
method: "GET",
path: pluginArchRoutePaths.connectorTargets,
request: { params: connectorInstanceParamsSchema, query: connectorTargetListQuerySchema },
response: { description: "Connector target list.", schema: connectorTargetListResponseSchema, status: 200 },
tag: "Connectors",
},
createConnectorTarget: {
audience: "admin",
description: "Create one connector target such as a GitHub repository branch.",
method: "POST",
path: pluginArchRoutePaths.connectorTargets,
request: { body: connectorTargetCreateSchema, params: connectorInstanceParamsSchema },
response: { description: "Connector target created successfully.", schema: connectorTargetMutationResponseSchema, status: 201 },
tag: "Connectors",
},
getConnectorTarget: {
audience: "admin",
description: "Get one connector target.",
method: "GET",
path: pluginArchRoutePaths.connectorTarget,
request: { params: connectorTargetParamsSchema },
response: { description: "Connector target detail.", schema: connectorTargetDetailResponseSchema, status: 200 },
tag: "Connectors",
},
updateConnectorTarget: {
audience: "admin",
description: "Patch one connector target.",
method: "PATCH",
path: pluginArchRoutePaths.connectorTarget,
request: { body: connectorTargetUpdateSchema, params: connectorTargetParamsSchema },
response: { description: "Connector target updated successfully.", schema: connectorTargetMutationResponseSchema, status: 200 },
tag: "Connectors",
},
resyncConnectorTarget: {
audience: "admin",
description: "Queue a manual reconciliation run for one connector target.",
method: "POST",
path: pluginArchRoutePaths.connectorTargetResync,
request: { params: connectorTargetParamsSchema },
response: { description: "Connector target resync queued successfully.", schema: connectorSyncAsyncResponseSchema, status: 202 },
tag: "Connectors",
},
listConnectorMappings: {
audience: "admin",
description: "List mappings configured under a connector target.",
method: "GET",
path: pluginArchRoutePaths.connectorTargetMappings,
request: { params: connectorTargetParamsSchema, query: connectorMappingListQuerySchema },
response: { description: "Connector mapping list.", schema: connectorMappingListResponseSchema, status: 200 },
tag: "Connectors",
},
createConnectorMapping: {
audience: "admin",
description: "Create a path or API mapping for a connector target.",
method: "POST",
path: pluginArchRoutePaths.connectorTargetMappings,
request: { body: connectorMappingCreateSchema, params: connectorTargetParamsSchema },
response: { description: "Connector mapping created successfully.", schema: connectorMappingMutationResponseSchema, status: 201 },
tag: "Connectors",
},
updateConnectorMapping: {
audience: "admin",
description: "Patch one connector mapping.",
method: "PATCH",
path: pluginArchRoutePaths.connectorMapping,
request: { body: connectorMappingUpdateSchema, params: connectorMappingParamsSchema },
response: { description: "Connector mapping updated successfully.", schema: connectorMappingMutationResponseSchema, status: 200 },
tag: "Connectors",
},
deleteConnectorMapping: {
audience: "admin",
description: "Delete one connector mapping.",
method: "DELETE",
path: pluginArchRoutePaths.connectorMapping,
request: { params: connectorMappingParamsSchema },
response: { description: "Connector mapping deleted successfully.", status: 204 },
tag: "Connectors",
},
listConnectorSyncEvents: {
audience: "admin",
description: "List connector sync events for inspection and debugging.",
method: "GET",
path: pluginArchRoutePaths.connectorSyncEvents,
request: { params: orgIdParamSchema, query: connectorSyncEventListQuerySchema },
response: { description: "Connector sync event list.", schema: connectorSyncEventListResponseSchema, status: 200 },
tag: "Connectors",
},
getConnectorSyncEvent: {
audience: "admin",
description: "Get one connector sync event.",
method: "GET",
path: pluginArchRoutePaths.connectorSyncEvent,
request: { params: connectorSyncEventParamsSchema },
response: { description: "Connector sync event detail.", schema: connectorSyncEventDetailResponseSchema, status: 200 },
tag: "Connectors",
},
retryConnectorSyncEvent: {
audience: "admin",
description: "Queue a retry for a failed or partial connector sync event.",
method: "POST",
path: pluginArchRoutePaths.connectorSyncEventRetry,
request: { params: connectorSyncEventParamsSchema },
response: { description: "Connector sync retry queued successfully.", schema: connectorSyncAsyncResponseSchema, status: 202 },
tag: "Connectors",
},
githubSetup: {
audience: "admin",
description: "Create the GitHub connector account, instance, target, and initial mappings in one setup flow.",
method: "POST",
path: pluginArchRoutePaths.githubSetup,
request: { body: githubConnectorSetupSchema, params: orgIdParamSchema },
response: { description: "GitHub connector setup created successfully.", schema: githubSetupResponseSchema, status: 201 },
tag: "GitHub",
},
githubCreateAccount: {
audience: "admin",
description: "Persist a GitHub App installation as a reusable connector account.",
method: "POST",
path: pluginArchRoutePaths.githubAccounts,
request: { body: githubConnectorAccountCreateSchema, params: orgIdParamSchema },
response: { description: "GitHub connector account created successfully.", schema: connectorAccountMutationResponseSchema, status: 201 },
tag: "GitHub",
},
githubListRepositories: {
audience: "admin",
description: "List repositories visible to one GitHub connector account.",
method: "GET",
path: pluginArchRoutePaths.githubAccountRepositories,
request: { params: connectorAccountRepositoryParamsSchema, query: githubRepositoryListQuerySchema },
response: { description: "GitHub repositories visible to the installation.", schema: githubRepositoryListResponseSchema, status: 200 },
tag: "GitHub",
},
githubValidateTarget: {
audience: "admin",
description: "Validate one GitHub repository-branch target before persisting it.",
method: "POST",
path: pluginArchRoutePaths.githubValidateTarget,
request: { body: githubValidateTargetSchema, params: orgIdParamSchema },
response: { description: "GitHub target validation result.", schema: githubValidateTargetResponseSchema, status: 200 },
tag: "GitHub",
},
githubWebhookIngress: {
audience: "public_webhook",
description: "Accept a GitHub App webhook delivery, verify the raw-body signature, and enqueue any relevant sync work.",
method: "POST",
path: pluginArchRoutePaths.githubWebhookIngress,
request: { body: githubWebhookRawBodySchema, headers: githubWebhookHeadersSchema },
response: { description: "Valid webhook accepted or ignored.", schema: githubWebhookAcceptedResponseSchema.or(githubWebhookIgnoredResponseSchema), status: 202 },
tag: "Webhooks",
},
}
export const deferredPluginArchEndpointContracts: DeferredEndpointContract[] = [
{
description: "Compare two config object versions.",
method: "GET",
path: pluginArchRoutePaths.configObjectCompareVersions,
reason: "Diff semantics can wait until immutable version storage exists.",
tag: "Config Objects",
},
{
description: "Type-specific convenience endpoints for skills.",
method: "GET",
path: pluginArchRoutePaths.skills,
reason: "Shared config-object routes land first; per-type wrappers follow once the core surface is working.",
tag: "Config Objects",
},
{
description: "Type-specific convenience endpoints for agents.",
method: "GET",
path: pluginArchRoutePaths.agents,
reason: "Shared config-object routes land first; per-type wrappers follow once the core surface is working.",
tag: "Config Objects",
},
{
description: "Type-specific convenience endpoints for commands.",
method: "GET",
path: pluginArchRoutePaths.commands,
reason: "Shared config-object routes land first; per-type wrappers follow once the core surface is working.",
tag: "Config Objects",
},
{
description: "Type-specific convenience endpoints for tools.",
method: "GET",
path: pluginArchRoutePaths.tools,
reason: "Shared config-object routes land first; per-type wrappers follow once the core surface is working.",
tag: "Config Objects",
},
{
description: "Type-specific convenience endpoints for MCPs.",
method: "GET",
path: pluginArchRoutePaths.mcps,
reason: "Shared config-object routes land first; per-type wrappers follow once the core surface is working.",
tag: "Config Objects",
},
{
description: "Create and list plugin releases.",
method: "POST",
path: pluginArchRoutePaths.pluginReleases,
reason: "Delivery and release snapshots stay deferred until the admin and webhook slice is live.",
tag: "Plugins",
},
{
description: "Preview one connector mapping against remote source data.",
method: "POST",
path: pluginArchRoutePaths.connectorMappingPreview,
reason: "Mapping preview depends on the later reconciliation engine and should not block the first admin slice.",
tag: "Connectors",
},
]
export const pluginArchContractSummary = {
implementationHome: {
adminApi: "ee/apps/den-api/src/routes/org",
persistence: "ee/packages/den-db/src/schema",
webhookIngress: "ee/apps/den-api/src/routes",
},
outOfScope: [
"plugin delivery/install endpoints",
"plugin release snapshot implementation",
"type-specific convenience wrappers",
],
} as const

View File

@@ -0,0 +1,5 @@
export * from "./contracts.js"
export * from "./access.js"
export * from "./routes.js"
export * from "./schemas.js"
export * from "./store.js"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,653 @@
import {
accessRoleValues,
configObjectCreatedViaValues,
configObjectSourceModeValues,
configObjectStatusValues,
configObjectTypeValues,
connectorAccountStatusValues,
connectorInstanceStatusValues,
connectorMappingKindValues,
connectorSyncEventTypeValues,
connectorSyncStatusValues,
connectorTargetKindValues,
connectorTypeValues,
membershipSourceValues,
pluginStatusValues,
} from "@openwork-ee/den-db/schema"
import { z } from "zod"
import { denTypeIdSchema } from "../../../openapi.js"
import { idParamSchema, orgIdParamSchema } from "../shared.js"
const cursorSchema = z.string().trim().min(1).max(255)
const jsonObjectSchema = z.object({}).passthrough()
const rawSourceTextSchema = z.string().trim().min(1)
const nullableStringSchema = z.string().trim().min(1).nullable()
const nullableTimestampSchema = z.string().datetime({ offset: true }).nullable()
const queryBooleanSchema = z.enum(["true", "false"]).transform((value) => value === "true")
export const githubWebhookEventValues = ["push", "installation", "installation_repositories", "repository"] as const
export const configObjectIdSchema = denTypeIdSchema("configObject")
export const configObjectVersionIdSchema = denTypeIdSchema("configObjectVersion")
export const configObjectAccessGrantIdSchema = denTypeIdSchema("configObjectAccessGrant")
export const pluginIdSchema = denTypeIdSchema("plugin")
export const pluginConfigObjectIdSchema = denTypeIdSchema("pluginConfigObject")
export const pluginAccessGrantIdSchema = denTypeIdSchema("pluginAccessGrant")
export const connectorAccountIdSchema = denTypeIdSchema("connectorAccount")
export const connectorInstanceIdSchema = denTypeIdSchema("connectorInstance")
export const connectorInstanceAccessGrantIdSchema = denTypeIdSchema("connectorInstanceAccessGrant")
export const connectorTargetIdSchema = denTypeIdSchema("connectorTarget")
export const connectorMappingIdSchema = denTypeIdSchema("connectorMapping")
export const connectorSyncEventIdSchema = denTypeIdSchema("connectorSyncEvent")
export const connectorSourceBindingIdSchema = denTypeIdSchema("connectorSourceBinding")
export const connectorSourceTombstoneIdSchema = denTypeIdSchema("connectorSourceTombstone")
export const memberIdSchema = denTypeIdSchema("member")
export const teamIdSchema = denTypeIdSchema("team")
export const configObjectTypeSchema = z.enum(configObjectTypeValues)
export const configObjectSourceModeSchema = z.enum(configObjectSourceModeValues)
export const configObjectCreatedViaSchema = z.enum(configObjectCreatedViaValues)
export const configObjectStatusSchema = z.enum(configObjectStatusValues)
export const pluginStatusSchema = z.enum(pluginStatusValues)
export const membershipSourceSchema = z.enum(membershipSourceValues)
export const accessRoleSchema = z.enum(accessRoleValues)
export const connectorTypeSchema = z.enum(connectorTypeValues)
export const connectorAccountStatusSchema = z.enum(connectorAccountStatusValues)
export const connectorInstanceStatusSchema = z.enum(connectorInstanceStatusValues)
export const connectorTargetKindSchema = z.enum(connectorTargetKindValues)
export const connectorMappingKindSchema = z.enum(connectorMappingKindValues)
export const connectorSyncStatusSchema = z.enum(connectorSyncStatusValues)
export const connectorSyncEventTypeSchema = z.enum(connectorSyncEventTypeValues)
export const githubWebhookEventSchema = z.enum(githubWebhookEventValues)
export const pluginArchPaginationQuerySchema = z.object({
cursor: cursorSchema.optional(),
limit: z.coerce.number().int().min(1).max(100).optional(),
})
export const configObjectListQuerySchema = pluginArchPaginationQuerySchema.extend({
type: configObjectTypeSchema.optional(),
status: configObjectStatusSchema.optional(),
sourceMode: configObjectSourceModeSchema.optional(),
pluginId: pluginIdSchema.optional(),
connectorInstanceId: connectorInstanceIdSchema.optional(),
includeDeleted: queryBooleanSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const configObjectVersionListQuerySchema = pluginArchPaginationQuerySchema.extend({
includeDeleted: queryBooleanSchema.optional(),
})
export const pluginListQuerySchema = pluginArchPaginationQuerySchema.extend({
status: pluginStatusSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const connectorAccountListQuerySchema = pluginArchPaginationQuerySchema.extend({
connectorType: connectorTypeSchema.optional(),
status: connectorAccountStatusSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const connectorInstanceListQuerySchema = pluginArchPaginationQuerySchema.extend({
connectorAccountId: connectorAccountIdSchema.optional(),
connectorType: connectorTypeSchema.optional(),
pluginId: pluginIdSchema.optional(),
status: connectorInstanceStatusSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const connectorTargetListQuerySchema = pluginArchPaginationQuerySchema.extend({
targetKind: connectorTargetKindSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const connectorMappingListQuerySchema = pluginArchPaginationQuerySchema.extend({
mappingKind: connectorMappingKindSchema.optional(),
objectType: configObjectTypeSchema.optional(),
pluginId: pluginIdSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const connectorSyncEventListQuerySchema = pluginArchPaginationQuerySchema.extend({
connectorInstanceId: connectorInstanceIdSchema.optional(),
connectorTargetId: connectorTargetIdSchema.optional(),
eventType: connectorSyncEventTypeSchema.optional(),
status: connectorSyncStatusSchema.optional(),
q: z.string().trim().min(1).max(255).optional(),
})
export const githubRepositoryListQuerySchema = pluginArchPaginationQuerySchema.extend({
q: z.string().trim().min(1).max(255).optional(),
})
export const configObjectParamsSchema = orgIdParamSchema.extend(idParamSchema("configObjectId", "configObject").shape)
export const configObjectVersionParamsSchema = configObjectParamsSchema.extend(idParamSchema("versionId", "configObjectVersion").shape)
export const configObjectAccessGrantParamsSchema = configObjectParamsSchema.extend(idParamSchema("grantId", "configObjectAccessGrant").shape)
export const pluginParamsSchema = orgIdParamSchema.extend(idParamSchema("pluginId", "plugin").shape)
export const pluginConfigObjectParamsSchema = pluginParamsSchema.extend(idParamSchema("configObjectId", "configObject").shape)
export const pluginAccessGrantParamsSchema = pluginParamsSchema.extend(idParamSchema("grantId", "pluginAccessGrant").shape)
export const connectorAccountParamsSchema = orgIdParamSchema.extend(idParamSchema("connectorAccountId", "connectorAccount").shape)
export const connectorInstanceParamsSchema = orgIdParamSchema.extend(idParamSchema("connectorInstanceId", "connectorInstance").shape)
export const connectorInstanceAccessGrantParamsSchema = connectorInstanceParamsSchema.extend(idParamSchema("grantId", "connectorInstanceAccessGrant").shape)
export const connectorTargetParamsSchema = orgIdParamSchema.extend(idParamSchema("connectorTargetId", "connectorTarget").shape)
export const connectorMappingParamsSchema = orgIdParamSchema.extend(idParamSchema("connectorMappingId", "connectorMapping").shape)
export const connectorSyncEventParamsSchema = orgIdParamSchema.extend(idParamSchema("connectorSyncEventId", "connectorSyncEvent").shape)
export const connectorAccountRepositoryParamsSchema = connectorAccountParamsSchema
export const configObjectInputSchema = z.object({
rawSourceText: rawSourceTextSchema.optional(),
normalizedPayloadJson: jsonObjectSchema.optional(),
parserMode: z.string().trim().min(1).max(100).optional(),
schemaVersion: z.string().trim().min(1).max(100).optional(),
metadata: jsonObjectSchema.optional(),
}).superRefine((value, ctx) => {
if (!value.rawSourceText && !value.normalizedPayloadJson) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide either rawSourceText or normalizedPayloadJson.",
path: ["rawSourceText"],
})
}
})
export const configObjectCreateSchema = z.object({
type: configObjectTypeSchema,
sourceMode: configObjectSourceModeSchema,
pluginIds: z.array(pluginIdSchema).max(100).optional(),
input: configObjectInputSchema,
})
export const configObjectCreateVersionSchema = z.object({
input: configObjectInputSchema,
reason: z.string().trim().min(1).max(255).optional(),
})
export const configObjectPluginAttachSchema = z.object({
pluginId: pluginIdSchema,
membershipSource: membershipSourceSchema.optional(),
})
export const resourceAccessGrantWriteSchema = z.object({
orgMembershipId: memberIdSchema.optional(),
teamId: teamIdSchema.optional(),
orgWide: z.boolean().optional().default(false),
role: accessRoleSchema,
}).superRefine((value, ctx) => {
const count = Number(Boolean(value.orgMembershipId)) + Number(Boolean(value.teamId)) + Number(Boolean(value.orgWide))
if (count !== 1) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide exactly one of orgMembershipId, teamId, or orgWide=true.",
path: ["orgMembershipId"],
})
}
})
export const pluginCreateSchema = z.object({
name: z.string().trim().min(1).max(255),
description: nullableStringSchema.optional(),
})
export const pluginUpdateSchema = z.object({
name: z.string().trim().min(1).max(255).optional(),
description: nullableStringSchema.optional(),
}).superRefine((value, ctx) => {
if (value.name === undefined && value.description === undefined) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide at least one field to update.",
path: ["name"],
})
}
})
export const pluginMembershipWriteSchema = z.object({
configObjectId: configObjectIdSchema,
membershipSource: membershipSourceSchema.optional(),
})
export const connectorAccountCreateSchema = z.object({
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255),
externalAccountRef: z.string().trim().min(1).max(255).nullable().optional(),
displayName: z.string().trim().min(1).max(255),
metadata: jsonObjectSchema.optional(),
})
export const connectorAccountDisconnectSchema = z.object({
reason: z.string().trim().min(1).max(255).optional(),
}).optional()
export const connectorInstanceCreateSchema = z.object({
connectorAccountId: connectorAccountIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable().optional(),
name: z.string().trim().min(1).max(255),
config: jsonObjectSchema.optional(),
})
export const connectorInstanceUpdateSchema = z.object({
remoteId: z.string().trim().min(1).max(255).nullable().optional(),
name: z.string().trim().min(1).max(255).optional(),
status: connectorInstanceStatusSchema.optional(),
config: jsonObjectSchema.optional(),
}).superRefine((value, ctx) => {
if (value.remoteId === undefined && value.name === undefined && value.status === undefined && value.config === undefined) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide at least one field to update.",
path: ["name"],
})
}
})
export const connectorTargetCreateSchema = z.object({
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255),
targetKind: connectorTargetKindSchema,
externalTargetRef: z.string().trim().min(1).max(255).nullable().optional(),
config: jsonObjectSchema,
})
export const connectorTargetUpdateSchema = z.object({
remoteId: z.string().trim().min(1).max(255).optional(),
externalTargetRef: z.string().trim().min(1).max(255).nullable().optional(),
config: jsonObjectSchema.optional(),
}).superRefine((value, ctx) => {
if (value.remoteId === undefined && value.externalTargetRef === undefined && value.config === undefined) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide at least one field to update.",
path: ["remoteId"],
})
}
})
export const connectorMappingCreateSchema = z.object({
mappingKind: connectorMappingKindSchema,
selector: z.string().trim().min(1).max(255),
objectType: configObjectTypeSchema,
pluginId: pluginIdSchema.nullable().optional(),
autoAddToPlugin: z.boolean().default(false),
config: jsonObjectSchema.optional(),
})
export const connectorMappingUpdateSchema = z.object({
selector: z.string().trim().min(1).max(255).optional(),
objectType: configObjectTypeSchema.optional(),
pluginId: pluginIdSchema.nullable().optional(),
autoAddToPlugin: z.boolean().optional(),
config: jsonObjectSchema.optional(),
}).superRefine((value, ctx) => {
if (
value.selector === undefined
&& value.objectType === undefined
&& value.pluginId === undefined
&& value.autoAddToPlugin === undefined
&& value.config === undefined
) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Provide at least one field to update.",
path: ["selector"],
})
}
})
export const githubConnectorSetupSchema = z.object({
installationId: z.number().int().positive(),
connectorAccountId: connectorAccountIdSchema.optional(),
connectorInstanceName: z.string().trim().min(1).max(255),
repositoryId: z.number().int().positive(),
repositoryFullName: z.string().trim().min(1).max(255),
branch: z.string().trim().min(1).max(255),
ref: z.string().trim().min(1).max(255),
mappings: z.array(connectorMappingCreateSchema).max(100).default([]),
})
export const githubConnectorAccountCreateSchema = z.object({
installationId: z.number().int().positive(),
accountLogin: z.string().trim().min(1).max(255),
accountType: z.enum(["Organization", "User"]),
displayName: z.string().trim().min(1).max(255),
})
export const githubValidateTargetSchema = z.object({
installationId: z.number().int().positive(),
repositoryId: z.number().int().positive(),
repositoryFullName: z.string().trim().min(1).max(255),
branch: z.string().trim().min(1).max(255),
ref: z.string().trim().min(1).max(255),
})
export const accessGrantSchema = z.object({
id: z.union([configObjectAccessGrantIdSchema, pluginAccessGrantIdSchema, connectorInstanceAccessGrantIdSchema]),
orgMembershipId: memberIdSchema.nullable(),
teamId: teamIdSchema.nullable(),
orgWide: z.boolean(),
role: accessRoleSchema,
createdByOrgMembershipId: memberIdSchema,
createdAt: z.string().datetime({ offset: true }),
removedAt: nullableTimestampSchema,
}).meta({ ref: "PluginArchAccessGrant" })
export const configObjectVersionSchema = z.object({
id: configObjectVersionIdSchema,
configObjectId: configObjectIdSchema,
schemaVersion: z.string().trim().min(1).max(100).nullable(),
normalizedPayloadJson: jsonObjectSchema.nullable(),
rawSourceText: z.string().nullable(),
createdVia: configObjectCreatedViaSchema,
createdByOrgMembershipId: memberIdSchema.nullable(),
connectorSyncEventId: connectorSyncEventIdSchema.nullable(),
sourceRevisionRef: z.string().trim().min(1).max(255).nullable(),
isDeletedVersion: z.boolean(),
createdAt: z.string().datetime({ offset: true }),
}).meta({ ref: "PluginArchConfigObjectVersion" })
export const configObjectSchema = z.object({
id: configObjectIdSchema,
organizationId: denTypeIdSchema("organization"),
objectType: configObjectTypeSchema,
sourceMode: configObjectSourceModeSchema,
title: z.string().trim().min(1).max(255),
description: nullableStringSchema,
searchText: z.string().trim().min(1).max(65535).nullable(),
currentFileName: z.string().trim().min(1).max(255).nullable(),
currentFileExtension: z.string().trim().min(1).max(32).nullable(),
currentRelativePath: z.string().trim().min(1).max(255).nullable(),
status: configObjectStatusSchema,
createdByOrgMembershipId: memberIdSchema,
connectorInstanceId: connectorInstanceIdSchema.nullable(),
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
deletedAt: nullableTimestampSchema,
latestVersion: configObjectVersionSchema.nullable(),
}).meta({ ref: "PluginArchConfigObject" })
export const pluginMembershipSchema = z.object({
id: pluginConfigObjectIdSchema,
pluginId: pluginIdSchema,
configObjectId: configObjectIdSchema,
membershipSource: membershipSourceSchema,
connectorMappingId: connectorMappingIdSchema.nullable(),
createdByOrgMembershipId: memberIdSchema.nullable(),
createdAt: z.string().datetime({ offset: true }),
removedAt: nullableTimestampSchema,
configObject: configObjectSchema.optional(),
}).meta({ ref: "PluginArchPluginMembership" })
export const pluginSchema = z.object({
id: pluginIdSchema,
organizationId: denTypeIdSchema("organization"),
name: z.string().trim().min(1).max(255),
description: nullableStringSchema,
status: pluginStatusSchema,
createdByOrgMembershipId: memberIdSchema,
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
deletedAt: nullableTimestampSchema,
memberCount: z.number().int().nonnegative().optional(),
}).meta({ ref: "PluginArchPlugin" })
export const connectorAccountSchema = z.object({
id: connectorAccountIdSchema,
organizationId: denTypeIdSchema("organization"),
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255),
externalAccountRef: z.string().trim().min(1).max(255).nullable(),
displayName: z.string().trim().min(1).max(255),
status: connectorAccountStatusSchema,
createdByOrgMembershipId: memberIdSchema,
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
metadata: jsonObjectSchema.optional(),
}).meta({ ref: "PluginArchConnectorAccount" })
export const connectorInstanceSchema = z.object({
id: connectorInstanceIdSchema,
organizationId: denTypeIdSchema("organization"),
connectorAccountId: connectorAccountIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable(),
name: z.string().trim().min(1).max(255),
status: connectorInstanceStatusSchema,
instanceConfigJson: jsonObjectSchema.nullable(),
lastSyncedAt: nullableTimestampSchema,
lastSyncStatus: connectorSyncStatusSchema.nullable(),
lastSyncCursor: z.string().trim().min(1).max(255).nullable(),
createdByOrgMembershipId: memberIdSchema,
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
}).meta({ ref: "PluginArchConnectorInstance" })
export const connectorTargetSchema = z.object({
id: connectorTargetIdSchema,
connectorInstanceId: connectorInstanceIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255),
targetKind: connectorTargetKindSchema,
externalTargetRef: z.string().trim().min(1).max(255).nullable(),
targetConfigJson: jsonObjectSchema,
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
}).meta({ ref: "PluginArchConnectorTarget" })
export const connectorMappingSchema = z.object({
id: connectorMappingIdSchema,
connectorInstanceId: connectorInstanceIdSchema,
connectorTargetId: connectorTargetIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable(),
mappingKind: connectorMappingKindSchema,
selector: z.string().trim().min(1).max(255),
objectType: configObjectTypeSchema,
pluginId: pluginIdSchema.nullable(),
autoAddToPlugin: z.boolean(),
mappingConfigJson: jsonObjectSchema.nullable(),
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
}).meta({ ref: "PluginArchConnectorMapping" })
export const connectorSyncSummarySchema = z.object({
createdCount: z.number().int().nonnegative().optional(),
updatedCount: z.number().int().nonnegative().optional(),
deletedCount: z.number().int().nonnegative().optional(),
skippedCount: z.number().int().nonnegative().optional(),
failedCount: z.number().int().nonnegative().optional(),
failures: z.array(jsonObjectSchema).optional(),
}).passthrough().meta({ ref: "PluginArchConnectorSyncSummary" })
export const connectorSyncEventSchema = z.object({
id: connectorSyncEventIdSchema,
connectorInstanceId: connectorInstanceIdSchema,
connectorTargetId: connectorTargetIdSchema.nullable(),
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable(),
eventType: connectorSyncEventTypeSchema,
externalEventRef: z.string().trim().min(1).max(255).nullable(),
sourceRevisionRef: z.string().trim().min(1).max(255).nullable(),
status: connectorSyncStatusSchema,
summaryJson: connectorSyncSummarySchema.nullable(),
startedAt: z.string().datetime({ offset: true }),
completedAt: nullableTimestampSchema,
}).meta({ ref: "PluginArchConnectorSyncEvent" })
export const connectorSourceBindingSchema = z.object({
id: connectorSourceBindingIdSchema,
configObjectId: configObjectIdSchema,
connectorInstanceId: connectorInstanceIdSchema,
connectorTargetId: connectorTargetIdSchema,
connectorMappingId: connectorMappingIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable(),
externalLocator: z.string().trim().min(1).max(255),
externalStableRef: z.string().trim().min(1).max(255).nullable(),
lastSeenSourceRevisionRef: z.string().trim().min(1).max(255).nullable(),
status: configObjectStatusSchema,
createdAt: z.string().datetime({ offset: true }),
updatedAt: z.string().datetime({ offset: true }),
deletedAt: nullableTimestampSchema,
}).meta({ ref: "PluginArchConnectorSourceBinding" })
export const connectorSourceTombstoneSchema = z.object({
id: connectorSourceTombstoneIdSchema,
connectorInstanceId: connectorInstanceIdSchema,
connectorTargetId: connectorTargetIdSchema,
connectorMappingId: connectorMappingIdSchema,
connectorType: connectorTypeSchema,
remoteId: z.string().trim().min(1).max(255).nullable(),
externalLocator: z.string().trim().min(1).max(255),
formerConfigObjectId: configObjectIdSchema,
deletedInSyncEventId: connectorSyncEventIdSchema,
deletedSourceRevisionRef: z.string().trim().min(1).max(255).nullable(),
createdAt: z.string().datetime({ offset: true }),
}).meta({ ref: "PluginArchConnectorSourceTombstone" })
export const githubWebhookHeadersSchema = z.object({
xHubSignature256: z.string().trim().min(1),
xGithubEvent: githubWebhookEventSchema,
xGithubDelivery: z.string().trim().min(1),
}).meta({ ref: "PluginArchGithubWebhookHeaders" })
export const githubWebhookPayloadSchema = z.object({
after: z.string().trim().min(1).optional(),
installation: z.object({
id: z.number().int().positive(),
}).passthrough().optional(),
ref: z.string().trim().min(1).optional(),
repository: z.object({
full_name: z.string().trim().min(1),
id: z.number().int().positive(),
}).passthrough().optional(),
}).passthrough().meta({ ref: "PluginArchGithubWebhookPayload" })
export const githubWebhookEnvelopeSchema = z.object({
deliveryId: z.string().trim().min(1),
event: githubWebhookEventSchema,
installationId: z.number().int().positive().optional(),
repositoryId: z.number().int().positive().optional(),
repositoryFullName: z.string().trim().min(1).optional(),
ref: z.string().trim().min(1).optional(),
headSha: z.string().trim().min(1).optional(),
payload: githubWebhookPayloadSchema,
}).meta({ ref: "PluginArchGithubWebhookEnvelope" })
export const githubConnectorSyncJobSchema = z.object({
connectorType: z.literal("github"),
connectorInstanceId: connectorInstanceIdSchema,
connectorTargetId: connectorTargetIdSchema,
connectorSyncEventId: connectorSyncEventIdSchema,
deliveryId: z.string().trim().min(1),
installationId: z.number().int().positive(),
repositoryId: z.number().int().positive(),
repositoryFullName: z.string().trim().min(1),
ref: z.string().trim().min(1),
headSha: z.string().trim().min(1),
}).meta({ ref: "PluginArchGithubConnectorSyncJob" })
export const githubWebhookRawBodySchema = z.string().min(1).meta({ ref: "PluginArchGithubWebhookRawBody" })
export const githubWebhookAcceptedResponseSchema = z.object({
ok: z.literal(true),
accepted: z.literal(true),
event: githubWebhookEventSchema,
deliveryId: z.string().trim().min(1),
queued: z.boolean(),
}).meta({ ref: "PluginArchGithubWebhookAcceptedResponse" })
export const githubWebhookIgnoredResponseSchema = z.object({
ok: z.literal(true),
accepted: z.literal(false),
reason: z.string().trim().min(1),
}).meta({ ref: "PluginArchGithubWebhookIgnoredResponse" })
export const githubWebhookUnauthorizedResponseSchema = z.object({
ok: z.literal(false),
error: z.literal("invalid signature"),
}).meta({ ref: "PluginArchGithubWebhookUnauthorizedResponse" })
export function pluginArchListResponseSchema<TSchema extends z.ZodTypeAny>(ref: string, itemSchema: TSchema) {
return z.object({
items: z.array(itemSchema),
nextCursor: cursorSchema.nullable(),
}).meta({ ref })
}
export function pluginArchDetailResponseSchema<TSchema extends z.ZodTypeAny>(ref: string, itemSchema: TSchema) {
return z.object({
item: itemSchema,
}).meta({ ref })
}
export function pluginArchMutationResponseSchema<TSchema extends z.ZodTypeAny>(ref: string, itemSchema: TSchema) {
return z.object({
ok: z.literal(true),
item: itemSchema,
}).meta({ ref })
}
export function pluginArchAsyncResponseSchema<TSchema extends z.ZodTypeAny>(ref: string, jobSchema: TSchema) {
return z.object({
ok: z.literal(true),
queued: z.literal(true),
job: jobSchema,
}).meta({ ref })
}
export const configObjectListResponseSchema = pluginArchListResponseSchema("PluginArchConfigObjectListResponse", configObjectSchema)
export const configObjectDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConfigObjectDetailResponse", configObjectSchema)
export const configObjectMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchConfigObjectMutationResponse", configObjectSchema)
export const configObjectVersionListResponseSchema = pluginArchListResponseSchema("PluginArchConfigObjectVersionListResponse", configObjectVersionSchema)
export const configObjectVersionDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConfigObjectVersionDetailResponse", configObjectVersionSchema)
export const pluginListResponseSchema = pluginArchListResponseSchema("PluginArchPluginListResponse", pluginSchema)
export const pluginDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchPluginDetailResponse", pluginSchema)
export const pluginMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchPluginMutationResponse", pluginSchema)
export const pluginMembershipListResponseSchema = pluginArchListResponseSchema("PluginArchPluginMembershipListResponse", pluginMembershipSchema)
export const pluginMembershipDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchPluginMembershipDetailResponse", pluginMembershipSchema)
export const pluginMembershipMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchPluginMembershipMutationResponse", pluginMembershipSchema)
export const accessGrantListResponseSchema = pluginArchListResponseSchema("PluginArchAccessGrantListResponse", accessGrantSchema)
export const accessGrantMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchAccessGrantMutationResponse", accessGrantSchema)
export const connectorAccountListResponseSchema = pluginArchListResponseSchema("PluginArchConnectorAccountListResponse", connectorAccountSchema)
export const connectorAccountDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConnectorAccountDetailResponse", connectorAccountSchema)
export const connectorAccountMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchConnectorAccountMutationResponse", connectorAccountSchema)
export const connectorInstanceListResponseSchema = pluginArchListResponseSchema("PluginArchConnectorInstanceListResponse", connectorInstanceSchema)
export const connectorInstanceDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConnectorInstanceDetailResponse", connectorInstanceSchema)
export const connectorInstanceMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchConnectorInstanceMutationResponse", connectorInstanceSchema)
export const connectorTargetListResponseSchema = pluginArchListResponseSchema("PluginArchConnectorTargetListResponse", connectorTargetSchema)
export const connectorTargetDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConnectorTargetDetailResponse", connectorTargetSchema)
export const connectorTargetMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchConnectorTargetMutationResponse", connectorTargetSchema)
export const connectorMappingListResponseSchema = pluginArchListResponseSchema("PluginArchConnectorMappingListResponse", connectorMappingSchema)
export const connectorMappingMutationResponseSchema = pluginArchMutationResponseSchema("PluginArchConnectorMappingMutationResponse", connectorMappingSchema)
export const connectorSyncEventListResponseSchema = pluginArchListResponseSchema("PluginArchConnectorSyncEventListResponse", connectorSyncEventSchema)
export const connectorSyncEventDetailResponseSchema = pluginArchDetailResponseSchema("PluginArchConnectorSyncEventDetailResponse", connectorSyncEventSchema)
export const connectorSyncAsyncResponseSchema = pluginArchAsyncResponseSchema(
"PluginArchConnectorSyncAsyncResponse",
z.object({ id: connectorSyncEventIdSchema }),
)
export const githubRepositorySchema = z.object({
id: z.number().int().positive(),
fullName: z.string().trim().min(1),
defaultBranch: z.string().trim().min(1).nullable(),
private: z.boolean(),
}).meta({ ref: "PluginArchGithubRepository" })
export const githubRepositoryListResponseSchema = pluginArchListResponseSchema("PluginArchGithubRepositoryListResponse", githubRepositorySchema)
export const githubSetupResponseSchema = pluginArchMutationResponseSchema(
"PluginArchGithubSetupResponse",
z.object({
connectorAccount: connectorAccountSchema,
connectorInstance: connectorInstanceSchema,
connectorTarget: connectorTargetSchema,
}),
)
export const githubValidateTargetResponseSchema = pluginArchMutationResponseSchema(
"PluginArchGithubValidateTargetResponse",
z.object({
branchExists: z.boolean(),
defaultBranch: z.string().trim().min(1).nullable(),
repositoryAccessible: z.boolean(),
}),
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,100 @@
import { createHmac, timingSafeEqual } from "node:crypto"
import type { Env, Hono } from "hono"
import { describeRoute } from "hono-openapi"
import { env } from "../../env.js"
import { emptyResponse, jsonResponse } from "../../openapi.js"
import { enqueueGithubWebhookSync } from "../org/plugin-system/store.js"
import {
githubWebhookAcceptedResponseSchema,
githubWebhookIgnoredResponseSchema,
githubWebhookUnauthorizedResponseSchema,
} from "../org/plugin-system/schemas.js"
import { pluginArchRoutePaths } from "../org/plugin-system/contracts.js"
export function signGithubBody(rawBody: string, secret: string) {
return `sha256=${createHmac("sha256", secret).update(rawBody).digest("hex")}`
}
export function safeCompareGithubSignature(received: string, expected: string) {
const encoder = new TextEncoder()
const receivedBuffer = encoder.encode(received)
const expectedBuffer = encoder.encode(expected)
if (receivedBuffer.length !== expectedBuffer.length) {
return false
}
return timingSafeEqual(receivedBuffer, expectedBuffer)
}
export function registerGithubWebhookRoutes<T extends Env>(app: Hono<T>) {
app.post(
pluginArchRoutePaths.githubWebhookIngress,
describeRoute({
tags: ["Webhooks"],
summary: "GitHub webhook ingress",
description: "Verifies a GitHub App webhook signature against the raw request body, then records any relevant sync work.",
responses: {
200: jsonResponse("Ignored but valid GitHub webhook delivery.", githubWebhookIgnoredResponseSchema),
202: jsonResponse("Accepted GitHub webhook delivery.", githubWebhookAcceptedResponseSchema),
401: jsonResponse("Invalid GitHub webhook signature.", githubWebhookUnauthorizedResponseSchema),
503: emptyResponse("GitHub webhook secret is not configured."),
},
}),
async (c) => {
const secret = env.githubConnectorApp.webhookSecret
if (!secret) {
return c.body(null, 503)
}
const rawBody = await c.req.raw.text()
const signature = c.req.raw.headers.get("x-hub-signature-256")?.trim() ?? ""
if (!signature) {
return c.json({ ok: false, error: "invalid signature" }, 401)
}
const expected = signGithubBody(rawBody, secret)
if (!safeCompareGithubSignature(signature, expected)) {
return c.json({ ok: false, error: "invalid signature" }, 401)
}
const event = c.req.raw.headers.get("x-github-event")?.trim() ?? ""
const deliveryId = c.req.raw.headers.get("x-github-delivery")?.trim() ?? ""
if (!event || !deliveryId) {
return c.json({ ok: true, accepted: false, reason: "event ignored" }, 200)
}
const normalizedEvent = event === "push" || event === "installation" || event === "installation_repositories" || event === "repository"
? event
: null
if (!normalizedEvent) {
return c.json({ ok: true, accepted: false, reason: "event ignored" }, 200)
}
const payload = JSON.parse(rawBody) as Record<string, unknown>
const installationId = payload.installation && typeof payload.installation === "object" && typeof (payload.installation as Record<string, unknown>).id === "number"
? (payload.installation as Record<string, unknown>).id as number
: undefined
const repository = payload.repository && typeof payload.repository === "object" ? payload.repository as Record<string, unknown> : null
const repositoryFullName = typeof repository?.full_name === "string" ? repository.full_name : undefined
const repositoryId = typeof repository?.id === "number" ? repository.id : undefined
const ref = typeof payload.ref === "string" ? payload.ref : undefined
const headSha = typeof payload.after === "string" ? payload.after : undefined
const accepted = await enqueueGithubWebhookSync({
deliveryId,
event: normalizedEvent,
headSha,
installationId,
payload,
ref,
repositoryFullName,
repositoryId,
})
if (!accepted.accepted) {
return c.json({ ok: true, accepted: false, reason: accepted.reason }, 200)
}
return c.json({ ok: true, accepted: true, deliveryId, event: normalizedEvent, queued: accepted.queued }, 202)
},
)
}

View File

@@ -0,0 +1,6 @@
import type { Env, Hono } from "hono"
import { registerGithubWebhookRoutes } from "./github.js"
export function registerWebhookRoutes<T extends Env>(app: Hono<T>) {
registerGithubWebhookRoutes(app)
}

View File

@@ -0,0 +1,91 @@
import { afterEach, beforeAll, expect, test } from "bun:test"
import { Hono } from "hono"
function seedRequiredEnv() {
process.env.DATABASE_URL = process.env.DATABASE_URL ?? "mysql://root:password@127.0.0.1:3306/openwork_test"
process.env.DEN_DB_ENCRYPTION_KEY = process.env.DEN_DB_ENCRYPTION_KEY ?? "x".repeat(32)
process.env.BETTER_AUTH_SECRET = process.env.BETTER_AUTH_SECRET ?? "y".repeat(32)
process.env.BETTER_AUTH_URL = process.env.BETTER_AUTH_URL ?? "http://127.0.0.1:8790"
}
let envModule: typeof import("../src/env.js")
let githubModule: typeof import("../src/routes/webhooks/github.js")
beforeAll(async () => {
seedRequiredEnv()
envModule = await import("../src/env.js")
githubModule = await import("../src/routes/webhooks/github.js")
})
afterEach(() => {
envModule.env.githubConnectorApp.webhookSecret = "super-secret"
})
function createWebhookApp() {
const app = new Hono()
githubModule.registerGithubWebhookRoutes(app)
return app
}
test("webhook route rejects invalid signatures before JSON parsing", async () => {
envModule.env.githubConnectorApp.webhookSecret = "super-secret"
const app = createWebhookApp()
const response = await app.request("http://den.local/api/webhooks/connectors/github", {
body: "{",
headers: {
"x-github-delivery": "delivery-1",
"x-github-event": "push",
"x-hub-signature-256": "sha256=wrong",
},
method: "POST",
})
expect(response.status).toBe(401)
await expect(response.json()).resolves.toEqual({ ok: false, error: "invalid signature" })
})
test("webhook route returns 503 when the GitHub webhook secret is unset", async () => {
envModule.env.githubConnectorApp.webhookSecret = undefined
const app = createWebhookApp()
const response = await app.request("http://den.local/api/webhooks/connectors/github", {
body: "{}",
headers: {
"x-github-delivery": "delivery-2",
"x-github-event": "push",
"x-hub-signature-256": "sha256=unused",
},
method: "POST",
})
expect(response.status).toBe(503)
})
test("webhook route accepts a valid signature and ignores unbound deliveries cleanly", async () => {
envModule.env.githubConnectorApp.webhookSecret = "super-secret"
const app = createWebhookApp()
const payload = JSON.stringify({
after: "abc123",
ref: "refs/heads/main",
repository: {
full_name: "different-ai/openwork",
id: 42,
},
})
const response = await app.request("http://den.local/api/webhooks/connectors/github", {
body: payload,
headers: {
"x-github-delivery": "delivery-3",
"x-github-event": "push",
"x-hub-signature-256": githubModule.signGithubBody(payload, "super-secret"),
},
method: "POST",
})
expect(response.status).toBe(200)
await expect(response.json()).resolves.toEqual({
ok: true,
accepted: false,
reason: "missing installation id",
})
})

View File

@@ -0,0 +1,90 @@
import { beforeAll, expect, test } from "bun:test"
function seedRequiredEnv() {
process.env.DATABASE_URL = process.env.DATABASE_URL ?? "mysql://root:password@127.0.0.1:3306/openwork_test"
process.env.DEN_DB_ENCRYPTION_KEY = process.env.DEN_DB_ENCRYPTION_KEY ?? "x".repeat(32)
process.env.BETTER_AUTH_SECRET = process.env.BETTER_AUTH_SECRET ?? "y".repeat(32)
process.env.BETTER_AUTH_URL = process.env.BETTER_AUTH_URL ?? "http://127.0.0.1:8790"
}
let accessModule: typeof import("../src/routes/org/plugin-system/access.js")
beforeAll(async () => {
seedRequiredEnv()
accessModule = await import("../src/routes/org/plugin-system/access.js")
})
function createActorContext(input?: { isOwner?: boolean; role?: string; teamIds?: string[] }) {
return {
memberTeams: (input?.teamIds ?? []).map((teamId) => ({
createdAt: new Date("2026-04-17T00:00:00.000Z"),
id: teamId,
name: teamId,
organizationId: "org_test",
updatedAt: new Date("2026-04-17T00:00:00.000Z"),
})),
organizationContext: {
currentMember: {
createdAt: new Date("2026-04-17T00:00:00.000Z"),
id: "member_current",
isOwner: input?.isOwner ?? false,
role: input?.role ?? "member",
userId: "user_current",
},
},
} as any
}
test("org owners and admins get plugin-system capability access", () => {
expect(accessModule.isPluginArchOrgAdmin(createActorContext({ isOwner: true }))).toBe(true)
expect(accessModule.isPluginArchOrgAdmin(createActorContext({ role: "member,admin" }))).toBe(true)
expect(accessModule.isPluginArchOrgAdmin(createActorContext({ role: "member" }))).toBe(false)
expect(accessModule.hasPluginArchCapability(createActorContext({ isOwner: true }), "plugin.create")).toBe(true)
expect(accessModule.hasPluginArchCapability(createActorContext({ role: "admin" }), "connector_instance.create")).toBe(true)
expect(accessModule.hasPluginArchCapability(createActorContext({ role: "member" }), "config_object.create")).toBe(false)
})
test("grant resolution supports direct, team, org-wide, and highest-role precedence", () => {
const grants = [
{
orgMembershipId: null,
orgWide: true,
removedAt: null,
role: "viewer",
teamId: null,
},
{
orgMembershipId: null,
orgWide: false,
removedAt: null,
role: "editor",
teamId: "team_alpha",
},
{
orgMembershipId: "member_current",
orgWide: false,
removedAt: null,
role: "manager",
teamId: null,
},
] as const
expect(accessModule.resolvePluginArchGrantRole({ grants: [...grants], memberId: "member_current", teamIds: ["team_alpha"] })).toBe("manager")
expect(accessModule.resolvePluginArchGrantRole({ grants: [...grants], memberId: "other_member", teamIds: ["team_alpha"] })).toBe("editor")
expect(accessModule.resolvePluginArchGrantRole({ grants: [...grants], memberId: "other_member", teamIds: [] })).toBe("viewer")
})
test("removed grants are ignored during resolution", () => {
expect(accessModule.resolvePluginArchGrantRole({
grants: [{
orgMembershipId: "member_current",
orgWide: false,
removedAt: new Date("2026-04-17T00:00:00.000Z"),
role: "manager",
teamId: null,
}],
memberId: "member_current",
teamIds: [],
})).toBeNull()
})

View File

@@ -0,0 +1,284 @@
CREATE TABLE IF NOT EXISTS `config_object` (
`id` varchar(64) NOT NULL,
`organization_id` varchar(64) NOT NULL,
`object_type` enum('skill','agent','command','tool','mcp','hook','context','custom') NOT NULL,
`source_mode` enum('cloud','import','connector') NOT NULL,
`title` varchar(255) NOT NULL,
`description` text,
`search_text` text,
`current_file_name` varchar(255),
`current_file_extension` varchar(64),
`current_relative_path` varchar(2048),
`status` enum('active','inactive','deleted','archived','ingestion_error') NOT NULL DEFAULT 'active',
`created_by_org_membership_id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64),
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`deleted_at` timestamp(3) NULL,
CONSTRAINT `config_object_id` PRIMARY KEY(`id`),
KEY `config_object_organization_id` (`organization_id`),
KEY `config_object_type` (`object_type`),
KEY `config_object_source_mode` (`source_mode`),
KEY `config_object_status` (`status`),
KEY `config_object_created_by_org_membership_id` (`created_by_org_membership_id`),
KEY `config_object_connector_instance_id` (`connector_instance_id`),
KEY `config_object_current_relative_path` (`current_relative_path`)
);
CREATE TABLE IF NOT EXISTS `config_object_version` (
`id` varchar(64) NOT NULL,
`config_object_id` varchar(64) NOT NULL,
`normalized_payload_json` text,
`raw_source_text` text,
`schema_version` varchar(100),
`created_via` enum('cloud','import','connector','system') NOT NULL,
`created_by_org_membership_id` varchar(64),
`connector_sync_event_id` varchar(64),
`source_revision_ref` varchar(255),
`is_deleted_version` boolean NOT NULL DEFAULT false,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
CONSTRAINT `config_object_version_id` PRIMARY KEY(`id`),
KEY `config_object_version_config_object_id` (`config_object_id`),
KEY `config_object_version_created_by_org_membership_id` (`created_by_org_membership_id`),
KEY `config_object_version_connector_sync_event_id` (`connector_sync_event_id`),
KEY `config_object_version_source_revision_ref` (`source_revision_ref`),
KEY `config_object_version_lookup_latest` (`config_object_id`, `created_at`, `id`)
);
CREATE TABLE IF NOT EXISTS `plugin` (
`id` varchar(64) NOT NULL,
`organization_id` varchar(64) NOT NULL,
`name` varchar(255) NOT NULL,
`description` text,
`status` enum('active','inactive','deleted','archived') NOT NULL DEFAULT 'active',
`created_by_org_membership_id` varchar(64) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`deleted_at` timestamp(3) NULL,
CONSTRAINT `plugin_id` PRIMARY KEY(`id`),
KEY `plugin_organization_id` (`organization_id`),
KEY `plugin_created_by_org_membership_id` (`created_by_org_membership_id`),
KEY `plugin_status` (`status`),
KEY `plugin_name` (`name`)
);
CREATE TABLE IF NOT EXISTS `plugin_config_object` (
`id` varchar(64) NOT NULL,
`plugin_id` varchar(64) NOT NULL,
`config_object_id` varchar(64) NOT NULL,
`membership_source` enum('manual','connector','api','system') NOT NULL DEFAULT 'manual',
`connector_mapping_id` varchar(64),
`created_by_org_membership_id` varchar(64),
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`removed_at` timestamp(3) NULL,
CONSTRAINT `plugin_config_object_id` PRIMARY KEY(`id`),
CONSTRAINT `plugin_config_object_plugin_config_object` UNIQUE(`plugin_id`, `config_object_id`),
KEY `plugin_config_object_plugin_id` (`plugin_id`),
KEY `plugin_config_object_config_object_id` (`config_object_id`),
KEY `plugin_config_object_connector_mapping_id` (`connector_mapping_id`)
);
CREATE TABLE IF NOT EXISTS `config_object_access_grant` (
`id` varchar(64) NOT NULL,
`config_object_id` varchar(64) NOT NULL,
`org_membership_id` varchar(64),
`team_id` varchar(64),
`org_wide` boolean NOT NULL DEFAULT false,
`role` enum('viewer','editor','manager') NOT NULL,
`created_by_org_membership_id` varchar(64) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`removed_at` timestamp(3) NULL,
CONSTRAINT `config_object_access_grant_id` PRIMARY KEY(`id`),
CONSTRAINT `config_object_access_grant_object_org_membership` UNIQUE(`config_object_id`, `org_membership_id`),
CONSTRAINT `config_object_access_grant_object_team` UNIQUE(`config_object_id`, `team_id`),
KEY `config_object_access_grant_config_object_id` (`config_object_id`),
KEY `config_object_access_grant_org_membership_id` (`org_membership_id`),
KEY `config_object_access_grant_team_id` (`team_id`),
KEY `config_object_access_grant_org_wide` (`org_wide`)
);
CREATE TABLE IF NOT EXISTS `plugin_access_grant` (
`id` varchar(64) NOT NULL,
`plugin_id` varchar(64) NOT NULL,
`org_membership_id` varchar(64),
`team_id` varchar(64),
`org_wide` boolean NOT NULL DEFAULT false,
`role` enum('viewer','editor','manager') NOT NULL,
`created_by_org_membership_id` varchar(64) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`removed_at` timestamp(3) NULL,
CONSTRAINT `plugin_access_grant_id` PRIMARY KEY(`id`),
CONSTRAINT `plugin_access_grant_plugin_org_membership` UNIQUE(`plugin_id`, `org_membership_id`),
CONSTRAINT `plugin_access_grant_plugin_team` UNIQUE(`plugin_id`, `team_id`),
KEY `plugin_access_grant_plugin_id` (`plugin_id`),
KEY `plugin_access_grant_org_membership_id` (`org_membership_id`),
KEY `plugin_access_grant_team_id` (`team_id`),
KEY `plugin_access_grant_org_wide` (`org_wide`)
);
CREATE TABLE IF NOT EXISTS `connector_account` (
`id` varchar(64) NOT NULL,
`organization_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255) NOT NULL,
`external_account_ref` varchar(255),
`display_name` varchar(255) NOT NULL,
`status` enum('active','inactive','disconnected','error') NOT NULL DEFAULT 'active',
`created_by_org_membership_id` varchar(64) NOT NULL,
`metadata_json` json,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `connector_account_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_account_org_type_remote_id` UNIQUE(`organization_id`, `connector_type`, `remote_id`),
KEY `connector_account_organization_id` (`organization_id`),
KEY `connector_account_created_by_org_membership_id` (`created_by_org_membership_id`),
KEY `connector_account_connector_type` (`connector_type`),
KEY `connector_account_status` (`status`)
);
CREATE TABLE IF NOT EXISTS `connector_instance` (
`id` varchar(64) NOT NULL,
`organization_id` varchar(64) NOT NULL,
`connector_account_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255),
`name` varchar(255) NOT NULL,
`status` enum('active','disabled','archived','error') NOT NULL DEFAULT 'active',
`instance_config_json` json,
`last_synced_at` timestamp(3) NULL,
`last_sync_status` enum('pending','queued','running','completed','failed','partial','ignored'),
`last_sync_cursor` text,
`created_by_org_membership_id` varchar(64) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `connector_instance_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_instance_org_name` UNIQUE(`organization_id`, `name`),
KEY `connector_instance_organization_id` (`organization_id`),
KEY `connector_instance_connector_account_id` (`connector_account_id`),
KEY `connector_instance_created_by_org_membership_id` (`created_by_org_membership_id`),
KEY `connector_instance_connector_type` (`connector_type`),
KEY `connector_instance_status` (`status`)
);
CREATE TABLE IF NOT EXISTS `connector_instance_access_grant` (
`id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`org_membership_id` varchar(64),
`team_id` varchar(64),
`org_wide` boolean NOT NULL DEFAULT false,
`role` enum('viewer','editor','manager') NOT NULL,
`created_by_org_membership_id` varchar(64) NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`removed_at` timestamp(3) NULL,
CONSTRAINT `connector_instance_access_grant_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_instance_access_grant_instance_org_membership` UNIQUE(`connector_instance_id`, `org_membership_id`),
CONSTRAINT `connector_instance_access_grant_instance_team` UNIQUE(`connector_instance_id`, `team_id`),
KEY `connector_instance_access_grant_instance_id` (`connector_instance_id`),
KEY `connector_instance_access_grant_org_membership_id` (`org_membership_id`),
KEY `connector_instance_access_grant_team_id` (`team_id`),
KEY `connector_instance_access_grant_org_wide` (`org_wide`)
);
CREATE TABLE IF NOT EXISTS `connector_target` (
`id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255) NOT NULL,
`target_kind` enum('repository_branch') NOT NULL,
`external_target_ref` varchar(255),
`target_config_json` json NOT NULL,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `connector_target_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_target_instance_remote_id` UNIQUE(`connector_instance_id`, `remote_id`),
KEY `connector_target_connector_instance_id` (`connector_instance_id`),
KEY `connector_target_connector_type` (`connector_type`),
KEY `connector_target_target_kind` (`target_kind`)
);
CREATE TABLE IF NOT EXISTS `connector_mapping` (
`id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`connector_target_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255),
`mapping_kind` enum('path','api','custom') NOT NULL,
`selector` varchar(1024) NOT NULL,
`object_type` enum('skill','agent','command','tool','mcp','hook','context','custom') NOT NULL,
`plugin_id` varchar(64),
`auto_add_to_plugin` boolean NOT NULL DEFAULT false,
`mapping_config_json` json,
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
CONSTRAINT `connector_mapping_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_mapping_target_selector_object_type` UNIQUE(`connector_target_id`, `selector`, `object_type`),
KEY `connector_mapping_connector_instance_id` (`connector_instance_id`),
KEY `connector_mapping_connector_target_id` (`connector_target_id`),
KEY `connector_mapping_object_type` (`object_type`),
KEY `connector_mapping_plugin_id` (`plugin_id`)
);
CREATE TABLE IF NOT EXISTS `connector_sync_event` (
`id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`connector_target_id` varchar(64),
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255),
`event_type` enum('push','installation','installation_repositories','repository','manual_resync') NOT NULL,
`external_event_ref` varchar(255),
`source_revision_ref` varchar(255),
`status` enum('pending','queued','running','completed','failed','partial','ignored') NOT NULL DEFAULT 'pending',
`summary_json` json,
`started_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`completed_at` timestamp(3) NULL,
CONSTRAINT `connector_sync_event_id` PRIMARY KEY(`id`),
KEY `connector_sync_event_connector_instance_id` (`connector_instance_id`),
KEY `connector_sync_event_connector_target_id` (`connector_target_id`),
KEY `connector_sync_event_event_type` (`event_type`),
KEY `connector_sync_event_status` (`status`),
KEY `connector_sync_event_source_revision_ref` (`source_revision_ref`),
KEY `connector_sync_event_external_event_ref` (`external_event_ref`)
);
CREATE TABLE IF NOT EXISTS `connector_source_binding` (
`id` varchar(64) NOT NULL,
`config_object_id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`connector_target_id` varchar(64) NOT NULL,
`connector_mapping_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255),
`external_locator` varchar(2048) NOT NULL,
`external_stable_ref` varchar(255),
`last_seen_source_revision_ref` varchar(255),
`status` enum('active','inactive','deleted','archived','ingestion_error') NOT NULL DEFAULT 'active',
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
`updated_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3),
`deleted_at` timestamp(3) NULL,
CONSTRAINT `connector_source_binding_id` PRIMARY KEY(`id`),
CONSTRAINT `connector_source_binding_config_object` UNIQUE(`config_object_id`),
KEY `connector_source_binding_connector_instance_id` (`connector_instance_id`),
KEY `connector_source_binding_connector_target_id` (`connector_target_id`),
KEY `connector_source_binding_connector_mapping_id` (`connector_mapping_id`),
KEY `connector_source_binding_external_locator` (`external_locator`)
);
CREATE TABLE IF NOT EXISTS `connector_source_tombstone` (
`id` varchar(64) NOT NULL,
`connector_instance_id` varchar(64) NOT NULL,
`connector_target_id` varchar(64) NOT NULL,
`connector_mapping_id` varchar(64) NOT NULL,
`connector_type` enum('github') NOT NULL,
`remote_id` varchar(255),
`external_locator` varchar(2048) NOT NULL,
`former_config_object_id` varchar(64) NOT NULL,
`deleted_in_sync_event_id` varchar(64) NOT NULL,
`deleted_source_revision_ref` varchar(255),
`created_at` timestamp(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
CONSTRAINT `connector_source_tombstone_id` PRIMARY KEY(`id`),
KEY `connector_source_tombstone_connector_instance_id` (`connector_instance_id`),
KEY `connector_source_tombstone_connector_target_id` (`connector_target_id`),
KEY `connector_source_tombstone_connector_mapping_id` (`connector_mapping_id`),
KEY `connector_source_tombstone_external_locator` (`external_locator`),
KEY `connector_source_tombstone_former_config_object_id` (`former_config_object_id`)
);

View File

@@ -64,6 +64,13 @@
"when": 1775350000000,
"tag": "0009_api_keys",
"breakpoints": true
},
{
"idx": 10,
"version": "5",
"when": 1776427000000,
"tag": "0010_plugin_arch",
"breakpoints": true
}
]
}

View File

@@ -1,6 +1,7 @@
export * from "./auth"
export * from "./org"
export * from "./sharables/llm-providers"
export * from "./sharables/plugin-arch"
export * from "./sharables/skills"
export * from "./teams"
export * from "./workers"

View File

@@ -0,0 +1,656 @@
import { relations, sql } from "drizzle-orm"
import {
boolean,
index,
json,
mysqlEnum,
mysqlTable,
text,
timestamp,
uniqueIndex,
varchar,
} from "drizzle-orm/mysql-core"
import { denTypeIdColumn, encryptedColumn, encryptedTextColumn } from "../../columns"
import { MemberTable, OrganizationTable } from "../org"
import { TeamTable } from "../teams"
export const configObjectTypeValues = ["skill", "agent", "command", "tool", "mcp", "hook", "context", "custom"] as const
export const configObjectSourceModeValues = ["cloud", "import", "connector"] as const
export const configObjectStatusValues = ["active", "inactive", "deleted", "archived", "ingestion_error"] as const
export const configObjectCreatedViaValues = ["cloud", "import", "connector", "system"] as const
export const pluginStatusValues = ["active", "inactive", "deleted", "archived"] as const
export const membershipSourceValues = ["manual", "connector", "api", "system"] as const
export const accessRoleValues = ["viewer", "editor", "manager"] as const
export const connectorTypeValues = ["github"] as const
export const connectorAccountStatusValues = ["active", "inactive", "disconnected", "error"] as const
export const connectorInstanceStatusValues = ["active", "disabled", "archived", "error"] as const
export const connectorTargetKindValues = ["repository_branch"] as const
export const connectorMappingKindValues = ["path", "api", "custom"] as const
export const connectorSyncEventTypeValues = ["push", "installation", "installation_repositories", "repository", "manual_resync"] as const
export const connectorSyncStatusValues = ["pending", "queued", "running", "completed", "failed", "partial", "ignored"] as const
function encryptedJsonColumn<TData extends Record<string, unknown> | Array<unknown> | null>(columnName: string) {
return encryptedColumn<TData>(columnName, {
deserialize: (value) => JSON.parse(value) as TData,
serialize: (value) => JSON.stringify(value),
})
}
export const ConfigObjectTable = mysqlTable(
"config_object",
{
id: denTypeIdColumn("configObject", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
objectType: mysqlEnum("object_type", configObjectTypeValues).notNull(),
sourceMode: mysqlEnum("source_mode", configObjectSourceModeValues).notNull(),
title: varchar("title", { length: 255 }).notNull(),
description: text("description"),
searchText: text("search_text"),
currentFileName: varchar("current_file_name", { length: 255 }),
currentFileExtension: varchar("current_file_extension", { length: 64 }),
currentRelativePath: varchar("current_relative_path", { length: 255 }),
status: mysqlEnum("status", configObjectStatusValues).notNull().default("active"),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
deletedAt: timestamp("deleted_at", { fsp: 3 }),
},
(table) => [
index("config_object_organization_id").on(table.organizationId),
index("config_object_type").on(table.objectType),
index("config_object_source_mode").on(table.sourceMode),
index("config_object_status").on(table.status),
index("config_object_created_by_org_membership_id").on(table.createdByOrgMembershipId),
index("config_object_connector_instance_id").on(table.connectorInstanceId),
index("config_object_current_relative_path").on(table.currentRelativePath),
],
)
export const ConfigObjectVersionTable = mysqlTable(
"config_object_version",
{
id: denTypeIdColumn("configObjectVersion", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
configObjectId: denTypeIdColumn("configObject", "config_object_id").notNull(),
normalizedPayloadJson: encryptedJsonColumn<Record<string, unknown> | null>("normalized_payload_json"),
rawSourceText: encryptedTextColumn("raw_source_text"),
schemaVersion: varchar("schema_version", { length: 100 }),
createdVia: mysqlEnum("created_via", configObjectCreatedViaValues).notNull(),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id"),
connectorSyncEventId: denTypeIdColumn("connectorSyncEvent", "connector_sync_event_id"),
sourceRevisionRef: varchar("source_revision_ref", { length: 255 }),
isDeletedVersion: boolean("is_deleted_version").notNull().default(false),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [
index("config_object_version_organization_id").on(table.organizationId),
index("config_object_version_config_object_id").on(table.configObjectId),
index("config_object_version_created_by_org_membership_id").on(table.createdByOrgMembershipId),
index("config_object_version_connector_sync_event_id").on(table.connectorSyncEventId),
index("config_object_version_source_revision_ref").on(table.sourceRevisionRef),
index("config_object_version_lookup_latest").on(table.configObjectId, table.createdAt, table.id),
],
)
export const PluginTable = mysqlTable(
"plugin",
{
id: denTypeIdColumn("plugin", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
name: varchar("name", { length: 255 }).notNull(),
description: text("description"),
status: mysqlEnum("status", pluginStatusValues).notNull().default("active"),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
deletedAt: timestamp("deleted_at", { fsp: 3 }),
},
(table) => [
index("plugin_organization_id").on(table.organizationId),
index("plugin_created_by_org_membership_id").on(table.createdByOrgMembershipId),
index("plugin_status").on(table.status),
index("plugin_name").on(table.name),
],
)
export const PluginConfigObjectTable = mysqlTable(
"plugin_config_object",
{
id: denTypeIdColumn("pluginConfigObject", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
pluginId: denTypeIdColumn("plugin", "plugin_id").notNull(),
configObjectId: denTypeIdColumn("configObject", "config_object_id").notNull(),
membershipSource: mysqlEnum("membership_source", membershipSourceValues).notNull().default("manual"),
connectorMappingId: denTypeIdColumn("connectorMapping", "connector_mapping_id"),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
removedAt: timestamp("removed_at", { fsp: 3 }),
},
(table) => [
index("plugin_config_object_organization_id").on(table.organizationId),
index("plugin_config_object_plugin_id").on(table.pluginId),
index("plugin_config_object_config_object_id").on(table.configObjectId),
index("plugin_config_object_connector_mapping_id").on(table.connectorMappingId),
uniqueIndex("plugin_config_object_plugin_config_object").on(table.pluginId, table.configObjectId),
],
)
export const ConfigObjectAccessGrantTable = mysqlTable(
"config_object_access_grant",
{
id: denTypeIdColumn("configObjectAccessGrant", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
configObjectId: denTypeIdColumn("configObject", "config_object_id").notNull(),
orgMembershipId: denTypeIdColumn("member", "org_membership_id"),
teamId: denTypeIdColumn("team", "team_id"),
orgWide: boolean("org_wide").notNull().default(false),
role: mysqlEnum("role", accessRoleValues).notNull(),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
removedAt: timestamp("removed_at", { fsp: 3 }),
},
(table) => [
index("config_object_access_grant_organization_id").on(table.organizationId),
index("config_object_access_grant_config_object_id").on(table.configObjectId),
index("config_object_access_grant_org_membership_id").on(table.orgMembershipId),
index("config_object_access_grant_team_id").on(table.teamId),
index("config_object_access_grant_org_wide").on(table.orgWide),
uniqueIndex("config_object_access_grant_object_org_membership").on(table.configObjectId, table.orgMembershipId),
uniqueIndex("config_object_access_grant_object_team").on(table.configObjectId, table.teamId),
],
)
export const PluginAccessGrantTable = mysqlTable(
"plugin_access_grant",
{
id: denTypeIdColumn("pluginAccessGrant", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
pluginId: denTypeIdColumn("plugin", "plugin_id").notNull(),
orgMembershipId: denTypeIdColumn("member", "org_membership_id"),
teamId: denTypeIdColumn("team", "team_id"),
orgWide: boolean("org_wide").notNull().default(false),
role: mysqlEnum("role", accessRoleValues).notNull(),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
removedAt: timestamp("removed_at", { fsp: 3 }),
},
(table) => [
index("plugin_access_grant_organization_id").on(table.organizationId),
index("plugin_access_grant_plugin_id").on(table.pluginId),
index("plugin_access_grant_org_membership_id").on(table.orgMembershipId),
index("plugin_access_grant_team_id").on(table.teamId),
index("plugin_access_grant_org_wide").on(table.orgWide),
uniqueIndex("plugin_access_grant_plugin_org_membership").on(table.pluginId, table.orgMembershipId),
uniqueIndex("plugin_access_grant_plugin_team").on(table.pluginId, table.teamId),
],
)
export const ConnectorAccountTable = mysqlTable(
"connector_account",
{
id: denTypeIdColumn("connectorAccount", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }).notNull(),
externalAccountRef: varchar("external_account_ref", { length: 255 }),
displayName: varchar("display_name", { length: 255 }).notNull(),
status: mysqlEnum("status", connectorAccountStatusValues).notNull().default("active"),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
metadataJson: json("metadata_json").$type<Record<string, unknown> | null>(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
index("connector_account_organization_id").on(table.organizationId),
index("connector_account_created_by_org_membership_id").on(table.createdByOrgMembershipId),
index("connector_account_connector_type").on(table.connectorType),
index("connector_account_status").on(table.status),
uniqueIndex("connector_account_org_type_remote_id").on(table.organizationId, table.connectorType, table.remoteId),
],
)
export const ConnectorInstanceTable = mysqlTable(
"connector_instance",
{
id: denTypeIdColumn("connectorInstance", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorAccountId: denTypeIdColumn("connectorAccount", "connector_account_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }),
name: varchar("name", { length: 255 }).notNull(),
status: mysqlEnum("status", connectorInstanceStatusValues).notNull().default("active"),
instanceConfigJson: json("instance_config_json").$type<Record<string, unknown> | null>(),
lastSyncedAt: timestamp("last_synced_at", { fsp: 3 }),
lastSyncStatus: mysqlEnum("last_sync_status", connectorSyncStatusValues),
lastSyncCursor: text("last_sync_cursor"),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
index("connector_instance_organization_id").on(table.organizationId),
index("connector_instance_connector_account_id").on(table.connectorAccountId),
index("connector_instance_created_by_org_membership_id").on(table.createdByOrgMembershipId),
index("connector_instance_connector_type").on(table.connectorType),
index("connector_instance_status").on(table.status),
uniqueIndex("connector_instance_org_name").on(table.organizationId, table.name),
],
)
export const ConnectorInstanceAccessGrantTable = mysqlTable(
"connector_instance_access_grant",
{
id: denTypeIdColumn("connectorInstanceAccessGrant", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
orgMembershipId: denTypeIdColumn("member", "org_membership_id"),
teamId: denTypeIdColumn("team", "team_id"),
orgWide: boolean("org_wide").notNull().default(false),
role: mysqlEnum("role", accessRoleValues).notNull(),
createdByOrgMembershipId: denTypeIdColumn("member", "created_by_org_membership_id").notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
removedAt: timestamp("removed_at", { fsp: 3 }),
},
(table) => [
index("connector_instance_access_grant_organization_id").on(table.organizationId),
index("connector_instance_access_grant_instance_id").on(table.connectorInstanceId),
index("connector_instance_access_grant_org_membership_id").on(table.orgMembershipId),
index("connector_instance_access_grant_team_id").on(table.teamId),
index("connector_instance_access_grant_org_wide").on(table.orgWide),
uniqueIndex("connector_instance_access_grant_instance_org_membership").on(table.connectorInstanceId, table.orgMembershipId),
uniqueIndex("connector_instance_access_grant_instance_team").on(table.connectorInstanceId, table.teamId),
],
)
export const ConnectorTargetTable = mysqlTable(
"connector_target",
{
id: denTypeIdColumn("connectorTarget", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }).notNull(),
targetKind: mysqlEnum("target_kind", connectorTargetKindValues).notNull(),
externalTargetRef: varchar("external_target_ref", { length: 255 }),
targetConfigJson: json("target_config_json").$type<Record<string, unknown>>().notNull(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
index("connector_target_organization_id").on(table.organizationId),
index("connector_target_connector_instance_id").on(table.connectorInstanceId),
index("connector_target_connector_type").on(table.connectorType),
index("connector_target_target_kind").on(table.targetKind),
uniqueIndex("connector_target_instance_remote_id").on(table.connectorInstanceId, table.remoteId),
],
)
export const ConnectorMappingTable = mysqlTable(
"connector_mapping",
{
id: denTypeIdColumn("connectorMapping", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
connectorTargetId: denTypeIdColumn("connectorTarget", "connector_target_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }),
mappingKind: mysqlEnum("mapping_kind", connectorMappingKindValues).notNull(),
selector: varchar("selector", { length: 255 }).notNull(),
objectType: mysqlEnum("object_type", configObjectTypeValues).notNull(),
pluginId: denTypeIdColumn("plugin", "plugin_id"),
autoAddToPlugin: boolean("auto_add_to_plugin").notNull().default(false),
mappingConfigJson: json("mapping_config_json").$type<Record<string, unknown> | null>(),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
},
(table) => [
index("connector_mapping_organization_id").on(table.organizationId),
index("connector_mapping_connector_instance_id").on(table.connectorInstanceId),
index("connector_mapping_connector_target_id").on(table.connectorTargetId),
index("connector_mapping_object_type").on(table.objectType),
index("connector_mapping_plugin_id").on(table.pluginId),
uniqueIndex("connector_mapping_target_selector_object_type").on(table.connectorTargetId, table.selector, table.objectType),
],
)
export const ConnectorSyncEventTable = mysqlTable(
"connector_sync_event",
{
id: denTypeIdColumn("connectorSyncEvent", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
connectorTargetId: denTypeIdColumn("connectorTarget", "connector_target_id"),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }),
eventType: mysqlEnum("event_type", connectorSyncEventTypeValues).notNull(),
externalEventRef: varchar("external_event_ref", { length: 255 }),
sourceRevisionRef: varchar("source_revision_ref", { length: 255 }),
status: mysqlEnum("status", connectorSyncStatusValues).notNull().default("pending"),
summaryJson: json("summary_json").$type<Record<string, unknown> | null>(),
startedAt: timestamp("started_at", { fsp: 3 }).notNull().defaultNow(),
completedAt: timestamp("completed_at", { fsp: 3 }),
},
(table) => [
index("connector_sync_event_organization_id").on(table.organizationId),
index("connector_sync_event_connector_instance_id").on(table.connectorInstanceId),
index("connector_sync_event_connector_target_id").on(table.connectorTargetId),
index("connector_sync_event_event_type").on(table.eventType),
index("connector_sync_event_status").on(table.status),
index("connector_sync_event_source_revision_ref").on(table.sourceRevisionRef),
index("connector_sync_event_external_event_ref").on(table.externalEventRef),
],
)
export const ConnectorSourceBindingTable = mysqlTable(
"connector_source_binding",
{
id: denTypeIdColumn("connectorSourceBinding", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
configObjectId: denTypeIdColumn("configObject", "config_object_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
connectorTargetId: denTypeIdColumn("connectorTarget", "connector_target_id").notNull(),
connectorMappingId: denTypeIdColumn("connectorMapping", "connector_mapping_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }),
externalLocator: varchar("external_locator", { length: 255 }).notNull(),
externalStableRef: varchar("external_stable_ref", { length: 255 }),
lastSeenSourceRevisionRef: varchar("last_seen_source_revision_ref", { length: 255 }),
status: mysqlEnum("status", configObjectStatusValues).notNull().default("active"),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { fsp: 3 }).notNull().default(sql`CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3)`),
deletedAt: timestamp("deleted_at", { fsp: 3 }),
},
(table) => [
index("connector_source_binding_organization_id").on(table.organizationId),
index("connector_source_binding_config_object_id").on(table.configObjectId),
index("connector_source_binding_connector_instance_id").on(table.connectorInstanceId),
index("connector_source_binding_connector_target_id").on(table.connectorTargetId),
index("connector_source_binding_connector_mapping_id").on(table.connectorMappingId),
index("connector_source_binding_external_locator").on(table.externalLocator),
uniqueIndex("connector_source_binding_config_object").on(table.configObjectId),
],
)
export const ConnectorSourceTombstoneTable = mysqlTable(
"connector_source_tombstone",
{
id: denTypeIdColumn("connectorSourceTombstone", "id").notNull().primaryKey(),
organizationId: denTypeIdColumn("organization", "organization_id").notNull(),
connectorInstanceId: denTypeIdColumn("connectorInstance", "connector_instance_id").notNull(),
connectorTargetId: denTypeIdColumn("connectorTarget", "connector_target_id").notNull(),
connectorMappingId: denTypeIdColumn("connectorMapping", "connector_mapping_id").notNull(),
connectorType: mysqlEnum("connector_type", connectorTypeValues).notNull(),
remoteId: varchar("remote_id", { length: 255 }),
externalLocator: varchar("external_locator", { length: 255 }).notNull(),
formerConfigObjectId: denTypeIdColumn("configObject", "former_config_object_id").notNull(),
deletedInSyncEventId: denTypeIdColumn("connectorSyncEvent", "deleted_in_sync_event_id").notNull(),
deletedSourceRevisionRef: varchar("deleted_source_revision_ref", { length: 255 }),
createdAt: timestamp("created_at", { fsp: 3 }).notNull().defaultNow(),
},
(table) => [
index("connector_source_tombstone_organization_id").on(table.organizationId),
index("connector_source_tombstone_connector_instance_id").on(table.connectorInstanceId),
index("connector_source_tombstone_connector_target_id").on(table.connectorTargetId),
index("connector_source_tombstone_connector_mapping_id").on(table.connectorMappingId),
index("connector_source_tombstone_external_locator").on(table.externalLocator),
index("connector_source_tombstone_former_config_object_id").on(table.formerConfigObjectId),
],
)
export const configObjectRelations = relations(ConfigObjectTable, ({ many, one }) => ({
accessGrants: many(ConfigObjectAccessGrantTable),
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConfigObjectTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
createdByOrgMembership: one(MemberTable, {
fields: [ConfigObjectTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
memberships: many(PluginConfigObjectTable),
organization: one(OrganizationTable, {
fields: [ConfigObjectTable.organizationId],
references: [OrganizationTable.id],
}),
sourceBindings: many(ConnectorSourceBindingTable),
versions: many(ConfigObjectVersionTable),
}))
export const configObjectVersionRelations = relations(ConfigObjectVersionTable, ({ one }) => ({
configObject: one(ConfigObjectTable, {
fields: [ConfigObjectVersionTable.configObjectId],
references: [ConfigObjectTable.id],
}),
connectorSyncEvent: one(ConnectorSyncEventTable, {
fields: [ConfigObjectVersionTable.connectorSyncEventId],
references: [ConnectorSyncEventTable.id],
}),
createdByOrgMembership: one(MemberTable, {
fields: [ConfigObjectVersionTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
}))
export const pluginRelations = relations(PluginTable, ({ many, one }) => ({
accessGrants: many(PluginAccessGrantTable),
createdByOrgMembership: one(MemberTable, {
fields: [PluginTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
memberships: many(PluginConfigObjectTable),
organization: one(OrganizationTable, {
fields: [PluginTable.organizationId],
references: [OrganizationTable.id],
}),
mappings: many(ConnectorMappingTable),
}))
export const pluginConfigObjectRelations = relations(PluginConfigObjectTable, ({ one }) => ({
configObject: one(ConfigObjectTable, {
fields: [PluginConfigObjectTable.configObjectId],
references: [ConfigObjectTable.id],
}),
connectorMapping: one(ConnectorMappingTable, {
fields: [PluginConfigObjectTable.connectorMappingId],
references: [ConnectorMappingTable.id],
}),
createdByOrgMembership: one(MemberTable, {
fields: [PluginConfigObjectTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
plugin: one(PluginTable, {
fields: [PluginConfigObjectTable.pluginId],
references: [PluginTable.id],
}),
}))
export const configObjectAccessGrantRelations = relations(ConfigObjectAccessGrantTable, ({ one }) => ({
configObject: one(ConfigObjectTable, {
fields: [ConfigObjectAccessGrantTable.configObjectId],
references: [ConfigObjectTable.id],
}),
createdByOrgMembership: one(MemberTable, {
fields: [ConfigObjectAccessGrantTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
orgMembership: one(MemberTable, {
fields: [ConfigObjectAccessGrantTable.orgMembershipId],
references: [MemberTable.id],
}),
team: one(TeamTable, {
fields: [ConfigObjectAccessGrantTable.teamId],
references: [TeamTable.id],
}),
}))
export const pluginAccessGrantRelations = relations(PluginAccessGrantTable, ({ one }) => ({
createdByOrgMembership: one(MemberTable, {
fields: [PluginAccessGrantTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
orgMembership: one(MemberTable, {
fields: [PluginAccessGrantTable.orgMembershipId],
references: [MemberTable.id],
}),
plugin: one(PluginTable, {
fields: [PluginAccessGrantTable.pluginId],
references: [PluginTable.id],
}),
team: one(TeamTable, {
fields: [PluginAccessGrantTable.teamId],
references: [TeamTable.id],
}),
}))
export const connectorAccountRelations = relations(ConnectorAccountTable, ({ many, one }) => ({
createdByOrgMembership: one(MemberTable, {
fields: [ConnectorAccountTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
instances: many(ConnectorInstanceTable),
organization: one(OrganizationTable, {
fields: [ConnectorAccountTable.organizationId],
references: [OrganizationTable.id],
}),
}))
export const connectorInstanceRelations = relations(ConnectorInstanceTable, ({ many, one }) => ({
accessGrants: many(ConnectorInstanceAccessGrantTable),
account: one(ConnectorAccountTable, {
fields: [ConnectorInstanceTable.connectorAccountId],
references: [ConnectorAccountTable.id],
}),
configObjects: many(ConfigObjectTable),
createdByOrgMembership: one(MemberTable, {
fields: [ConnectorInstanceTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
mappings: many(ConnectorMappingTable),
organization: one(OrganizationTable, {
fields: [ConnectorInstanceTable.organizationId],
references: [OrganizationTable.id],
}),
sourceBindings: many(ConnectorSourceBindingTable),
syncEvents: many(ConnectorSyncEventTable),
targets: many(ConnectorTargetTable),
tombstones: many(ConnectorSourceTombstoneTable),
}))
export const connectorInstanceAccessGrantRelations = relations(ConnectorInstanceAccessGrantTable, ({ one }) => ({
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorInstanceAccessGrantTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
createdByOrgMembership: one(MemberTable, {
fields: [ConnectorInstanceAccessGrantTable.createdByOrgMembershipId],
references: [MemberTable.id],
}),
orgMembership: one(MemberTable, {
fields: [ConnectorInstanceAccessGrantTable.orgMembershipId],
references: [MemberTable.id],
}),
team: one(TeamTable, {
fields: [ConnectorInstanceAccessGrantTable.teamId],
references: [TeamTable.id],
}),
}))
export const connectorTargetRelations = relations(ConnectorTargetTable, ({ many, one }) => ({
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorTargetTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
mappings: many(ConnectorMappingTable),
sourceBindings: many(ConnectorSourceBindingTable),
syncEvents: many(ConnectorSyncEventTable),
tombstones: many(ConnectorSourceTombstoneTable),
}))
export const connectorMappingRelations = relations(ConnectorMappingTable, ({ many, one }) => ({
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorMappingTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
connectorTarget: one(ConnectorTargetTable, {
fields: [ConnectorMappingTable.connectorTargetId],
references: [ConnectorTargetTable.id],
}),
plugin: one(PluginTable, {
fields: [ConnectorMappingTable.pluginId],
references: [PluginTable.id],
}),
pluginMemberships: many(PluginConfigObjectTable),
sourceBindings: many(ConnectorSourceBindingTable),
tombstones: many(ConnectorSourceTombstoneTable),
}))
export const connectorSyncEventRelations = relations(ConnectorSyncEventTable, ({ many, one }) => ({
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorSyncEventTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
connectorTarget: one(ConnectorTargetTable, {
fields: [ConnectorSyncEventTable.connectorTargetId],
references: [ConnectorTargetTable.id],
}),
tombstones: many(ConnectorSourceTombstoneTable),
versions: many(ConfigObjectVersionTable),
}))
export const connectorSourceBindingRelations = relations(ConnectorSourceBindingTable, ({ one }) => ({
configObject: one(ConfigObjectTable, {
fields: [ConnectorSourceBindingTable.configObjectId],
references: [ConfigObjectTable.id],
}),
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorSourceBindingTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
connectorMapping: one(ConnectorMappingTable, {
fields: [ConnectorSourceBindingTable.connectorMappingId],
references: [ConnectorMappingTable.id],
}),
connectorTarget: one(ConnectorTargetTable, {
fields: [ConnectorSourceBindingTable.connectorTargetId],
references: [ConnectorTargetTable.id],
}),
}))
export const connectorSourceTombstoneRelations = relations(ConnectorSourceTombstoneTable, ({ one }) => ({
connectorInstance: one(ConnectorInstanceTable, {
fields: [ConnectorSourceTombstoneTable.connectorInstanceId],
references: [ConnectorInstanceTable.id],
}),
connectorMapping: one(ConnectorMappingTable, {
fields: [ConnectorSourceTombstoneTable.connectorMappingId],
references: [ConnectorMappingTable.id],
}),
connectorTarget: one(ConnectorTargetTable, {
fields: [ConnectorSourceTombstoneTable.connectorTargetId],
references: [ConnectorTargetTable.id],
}),
deletedInSyncEvent: one(ConnectorSyncEventTable, {
fields: [ConnectorSourceTombstoneTable.deletedInSyncEventId],
references: [ConnectorSyncEventTable.id],
}),
formerConfigObject: one(ConfigObjectTable, {
fields: [ConnectorSourceTombstoneTable.formerConfigObjectId],
references: [ConfigObjectTable.id],
}),
}))
export const configObject = ConfigObjectTable
export const configObjectVersion = ConfigObjectVersionTable
export const plugin = PluginTable
export const pluginConfigObject = PluginConfigObjectTable
export const configObjectAccessGrant = ConfigObjectAccessGrantTable
export const pluginAccessGrant = PluginAccessGrantTable
export const connectorAccount = ConnectorAccountTable
export const connectorInstance = ConnectorInstanceTable
export const connectorInstanceAccessGrant = ConnectorInstanceAccessGrantTable
export const connectorTarget = ConnectorTargetTable
export const connectorMapping = ConnectorMappingTable
export const connectorSyncEvent = ConnectorSyncEventTable
export const connectorSourceBinding = ConnectorSourceBindingTable
export const connectorSourceTombstone = ConnectorSourceTombstoneTable

View File

@@ -26,6 +26,20 @@ export const idTypesMapNameToPrefix = {
skillHub: "shb",
skillHubSkill: "shs",
skillHubMember: "shm",
configObject: "cob",
configObjectVersion: "cov",
configObjectAccessGrant: "coa",
plugin: "plg",
pluginConfigObject: "pco",
pluginAccessGrant: "pag",
connectorAccount: "cac",
connectorInstance: "cin",
connectorInstanceAccessGrant: "cia",
connectorTarget: "ctg",
connectorMapping: "cmp",
connectorSyncEvent: "cse",
connectorSourceBinding: "csb",
connectorSourceTombstone: "cst",
llmProvider: "lpr",
llmProviderModel: "lpm",
llmProviderAccess: "lpa",

View File

@@ -0,0 +1,773 @@
# GitHub Connector
This document describes how the GitHub connector should work for the new plugin architecture.
## Goal
Let an organization use a GitHub repo as a source of truth for config objects and plugins.
The GitHub connector should:
- connect through a GitHub App;
- let admins choose a repo and branch;
- let admins map repo paths to config object types and optional plugins;
- ingest matching files into OpenWork;
- keep OpenWork in sync when GitHub sends webhook events.
## Core model
The recommended model is:
- GitHub is an external source;
- OpenWork stores a connector account, connector instance, target repo, and path mappings;
- GitHub webhooks notify OpenWork that the selected branch changed;
- OpenWork then reconciles against the current branch head state;
- OpenWork does not treat individual changed files in the webhook payload as the final source of truth.
That means the primary sync model is:
- branch-head reconciliation, not event-by-event mutation replay.
## Why branch-head reconciliation is the right model
When a commit lands on the selected branch, the safest thing to do is:
1. determine the new head commit for the tracked branch;
2. read the current repo state at that commit;
3. evaluate all configured mappings against that state;
4. create, update, or tombstone config objects accordingly.
This is better than replaying per-file webhook changes because it:
- makes merges and squash merges behave the same way as regular pushes;
- avoids drift if webhook deliveries arrive out of order;
- makes retries idempotent;
- lets us recover from partial ingestion failures by re-running the same reconciliation at the same commit;
- treats GitHub branch state as the authoritative source, not webhook payload details.
The webhook tells us that something changed.
The repo head tells us what is now true.
## Main components
### GitHub App
We create and operate a GitHub App.
Users:
- install the app on their GitHub org or selected repos;
- authorize OpenWork to see which installations/repos they can use;
- choose one installed repo during connector setup.
The GitHub App gives us:
- installation identity;
- repo access;
- webhook delivery from GitHub;
- installation tokens for API access.
### OpenWork connector records
The GitHub connector should fit the generic connector model already documented in `prds/new-plugin-arch/datastructure.md`.
Relevant records:
- `connector_account`
- represents the GitHub App installation / account binding;
- `connector_instance`
- represents one configured use of GitHub inside an org;
- `connector_target`
- represents a specific repo + branch target;
- `connector_mapping`
- maps paths in that repo to config object types and optional plugin auto-membership;
- `connector_sync_event`
- records each webhook-triggered or manual sync run;
- `connector_source_binding`
- links an ingested config object to its GitHub source path;
- `connector_source_tombstone`
- preserves deleted path history.
### Flexible connector ids
The shared connector model should always keep:
- a local OpenWork `id`
- a connector `type`
- a connector-native `remote_id`
Current GitHub mapping:
- `type`: `github`
- target `remote_id`: `org/repo`
Recommended GitHub-specific examples:
- `connector_account.remote_id`
- GitHub installation id, or installation-scoped account key if we need a string form;
- `connector_target.remote_id`
- `org/repo`;
- `connector_source_binding.remote_id`
- GitHub blob/file identifier if useful, otherwise nullable and path-based identity is enough.
This keeps the schema flexible for other connectors while still preserving GitHub-specific identifiers.
## Setup flow
### 1. Install GitHub App
Admin installs the GitHub App into their GitHub org or user account.
OpenWork stores:
- GitHub installation id;
- GitHub account/org identity;
- available repos for that installation.
This becomes the `connector_account`.
Recommended stored fields:
- `connector_type = github`
- `remote_id = <github_installation_id>`
- installation account login/name
- installation account type (`Organization` or `User`)
### 2. Create connector instance
Inside OpenWork, an admin creates a GitHub connector instance.
They choose:
- installation/account;
- repo;
- branch;
- optional name for this connector instance.
This becomes:
- `connector_instance`
- one `connector_target` for repo + branch.
Recommended stored target fields:
- `connector_target.connector_type = github`
- `connector_target.remote_id = org/repo`
- repo numeric id
- repo owner login
- repo name
- branch name / full ref
- default branch at time of setup if useful for validation
### 3. Create repo mappings
Admin configures one or more mappings from repo paths to config object types.
Examples:
- `/sales/skills/**` -> `skill` -> plugin A
- `/sales/agents/**` -> `agent` -> plugin A
- `/finance/commands/**` -> `command` -> plugin B
- `/shared/mcps/**` -> `mcp` -> no auto-plugin
Each mapping may include:
- path selector/glob;
- target config object type;
- parser mode if needed;
- plugin id if auto-adding to a plugin;
- `auto_add_to_plugin` flag.
This becomes `connector_mapping`.
### 4. Initial full sync
After setup, OpenWork should run an initial full reconciliation against the selected branch head.
This seeds:
- config objects;
- config object versions;
- plugin memberships;
- source bindings.
## Webhook model
### Endpoint shape
GitHub Apps support a single webhook URL per app registration.
So the recommended shape is:
- one public GitHub ingress endpoint;
- internal routing to event-specific handlers.
Recommended public endpoint:
- `POST /api/webhooks/connectors/github`
Recommended internal handler split:
- `githubWebhookIngress()`
- receives the raw HTTP request
- verifies the signature
- parses headers and payload
- dispatches by event type
- `handleGithubPushEvent()`
- `handleGithubInstallationEvent()`
- `handleGithubInstallationRepositoriesEvent()`
- `handleGithubRepositoryEvent()` optional later
If we want subpath-style organization inside the app, we can still do that after ingress.
Example internal structure:
- public ingress: `POST /api/webhooks/connectors/github`
- internal modules:
- `webhooks/connectors/github/push`
- `webhooks/connectors/github/installation`
- `webhooks/connectors/github/installation-repositories`
Important constraint:
- GitHub itself should send to one externally registered webhook URL;
- event-specific subpaths are best treated as internal server organization, not multiple GitHub-facing URLs.
### Events we care about
For v1, the main event should be GitHub `push` webhook deliveries.
Why:
- a merge to the tracked branch produces a push event;
- a direct commit to the tracked branch also produces a push event;
- the push event gives us the repo, branch ref, and head commit.
So the practical rule is:
- ignore webhook events that do not change the selected branch;
- enqueue reconciliation when a push hits the selected branch.
We may also care about some non-content events for connector health, but not for ingestion truth:
- installation removed;
- repo access removed;
- repo renamed or archived.
Those should update connector state, but they should not replace branch-head content sync.
Current implementation note:
- `installation.deleted` updates matching `connector_account` rows to `disconnected` and does not enqueue a content sync job.
### Events we can ignore for ingestion
For config ingestion, we should ignore or de-prioritize:
- pushes to other branches;
- pull request open/update events;
- issue events;
- comment events;
- check runs;
- release events.
We do not need PR merge events separately if push-to-branch is our source trigger.
## What happens on webhook
### Recommended flow
When GitHub sends a webhook:
1. verify the GitHub webhook signature before doing anything else;
2. identify the GitHub installation and repo;
3. find matching `connector_target` rows;
4. ignore any target where the webhook ref does not equal the configured branch ref;
5. create a `connector_sync_event` in `pending` or `running` state;
6. enqueue a reconciliation job keyed by connector target + head commit;
7. return success to GitHub quickly.
Important:
- signature verification is mandatory, not optional;
- webhook handling should be lightweight;
- actual ingestion should happen asynchronously in a job worker.
### Signature verification requirements
OpenWork must verify the GitHub App webhook secret on every incoming webhook delivery.
Requirements:
- store the GitHub App webhook secret securely on the server side;
- validate the signature header from GitHub against the raw request body before JSON parsing or event processing;
- reject the request if the signature is missing, invalid, or computed from a body that does not match the raw bytes received;
- use a constant-time comparison when checking the computed signature;
- log verification failures at a security/ops level, but do not process the event.
Practical rule:
- no signature match, no webhook processing.
Additional hardening:
- record the GitHub delivery id for traceability;
- make delivery handling idempotent so safe retries are possible;
- optionally track duplicate delivery ids to reduce redundant work.
### Verification implementation shape
The signature check should happen in the public ingress endpoint before event dispatch.
Recommended flow:
1. read the raw request body bytes exactly as received;
2. read GitHub headers:
- `X-Hub-Signature-256`
- `X-GitHub-Event`
- `X-GitHub-Delivery`
3. compute HMAC SHA-256 over the raw body using the GitHub App webhook secret;
4. compare the computed digest with `X-Hub-Signature-256` using constant-time comparison;
5. reject the request if verification fails;
6. only then parse JSON and dispatch by event type.
Key implementation rule:
- the signature must be computed from the raw body, not from re-serialized JSON.
Recommended pseudocode shape:
```ts
async function githubWebhookIngress(req: Request) {
const rawBody = await req.text()
const signature = req.headers.get("x-hub-signature-256")
if (!signature) return new Response("missing signature", { status: 401 })
const expected = signGithubBody(rawBody, env.GITHUB_CONNECTOR_APP_WEBHOOK_SECRET)
if (!timingSafeEqual(signature, expected)) {
return new Response("invalid signature", { status: 401 })
}
const event = req.headers.get("x-github-event")
const deliveryId = req.headers.get("x-github-delivery")
const payload = JSON.parse(rawBody)
return dispatchGithubWebhook({ event, deliveryId, payload })
}
```
Recommended helper responsibilities:
- `signGithubBody(rawBody, secret)`
- returns `sha256=<digest>`
- `timingSafeEqual(a, b)`
- prevents naive string comparison timing leaks
- `dispatchGithubWebhook()`
- routes to event-specific handlers
### Dispatch model
After signature verification, the ingress should dispatch by event type.
Recommended event routing:
- `push` -> `handleGithubPushEvent`
- `installation` -> `handleGithubInstallationEvent`
- `installation_repositories` -> `handleGithubInstallationRepositoriesEvent`
- `repository` -> `handleGithubRepositoryEvent` optional later
- everything else -> acknowledge and ignore
That gives us:
- one secure ingress path;
- explicit event-specific logic;
- easy expansion later without changing the GitHub App registration URL.
For GitHub, useful preserved webhook fields include:
- `X-GitHub-Delivery`
- installation id
- repository id
- repository full name
- ref
- after SHA / head SHA
### API shape
This is the recommended API contract shape around the webhook ingress and async sync pipeline.
#### Public webhook ingress
Endpoint:
- `POST /api/webhooks/connectors/github`
Input:
- raw GitHub webhook request body
- GitHub headers including:
- `X-Hub-Signature-256`
- `X-GitHub-Event`
- `X-GitHub-Delivery`
Behavior:
1. verify signature against raw body
2. reject with `401` if invalid or missing
3. parse event metadata
4. if event is irrelevant, acknowledge and return success
5. if event is relevant, create or update a `connector_sync_event`
6. enqueue async reconciliation keyed by connector target and head SHA
7. return quickly without doing full ingestion inline
Recommended responses:
- `401 Unauthorized`
- signature missing or invalid
- `202 Accepted`
- valid event accepted for async processing
- `200 OK`
- valid but intentionally ignored event
Example response shape for accepted events:
```json
{
"ok": true,
"accepted": true,
"event": "push",
"deliveryId": "<github-delivery-id>",
"queued": true
}
```
Example response shape for ignored events:
```json
{
"ok": true,
"accepted": false,
"reason": "event ignored"
}
```
Example response shape for invalid signature:
```json
{
"ok": false,
"error": "invalid signature"
}
```
#### Internal webhook dispatch contract
Recommended normalized dispatch input:
```ts
type GithubWebhookEnvelope = {
deliveryId: string
event: string
installationId?: number
repositoryId?: number
repositoryFullName?: string
ref?: string
headSha?: string
payload: unknown
}
```
The ingress should build this envelope once, then hand it to event-specific handlers.
#### Internal sync enqueue contract
For relevant push events on a tracked branch, enqueue an internal sync job.
Recommended job payload shape:
```ts
type GithubConnectorSyncJob = {
connectorType: "github"
connectorInstanceId: string
connectorTargetId: string
connectorSyncEventId: string
deliveryId: string
installationId: number
repositoryId: number
repositoryFullName: string
ref: string
headSha: string
}
```
Important:
- dedupe jobs on `connectorTargetId + headSha`
- preserve `deliveryId` for observability
- do not require webhook redelivery to retry; the job should be rerunnable internally
#### Internal sync trigger behavior
The enqueue layer should:
- resolve matching connector targets by `connector_type`, repo identity, and branch ref
- create one logical sync event per target + head SHA
- avoid double-enqueuing the same target/head combination
- mark the sync event as queued/running before worker execution
#### Worker result contract
The reconciliation worker should update the corresponding `connector_sync_event` with at least:
- final status (`completed`, `failed`, `partial`)
- target id
- head SHA
- counts for created / updated / deleted / skipped objects
- per-file parse failures if any
- completed timestamp
This does not need to be a public API response, but it should be the internal result shape we can rely on for UI and debugging.
## Reconciliation job
### Input
The reconciliation job should take:
- `connector_instance_id`
- `connector_target_id`
- repo owner/name
- branch ref
- head commit SHA from the webhook
### Step 1: fetch current repo state
OpenWork should fetch repo state at the selected branch head commit.
Recommended rule:
- treat the head commit tree as the truth;
- do not rely solely on the changed-file list in the webhook.
Implementation choices:
- use GitHub contents/tree APIs for mapped paths;
- or fetch an archive / git tree snapshot for the relevant branch;
- or diff previous ingested SHA vs new SHA as an optimization later.
For v1, the clearest behavior is:
- enumerate all files matching the configured mappings at the new branch head.
### Step 2: resolve applicable files
For each `connector_mapping`:
1. list files under the mapped selector;
2. filter to files that are valid for that config type;
3. parse the file into the normalized config type shape;
4. build the desired-state set for that mapping.
This gives us the desired current set of GitHub-backed config objects for that target and commit.
### Step 3: compare desired state to current bindings
For each mapping, compare:
- desired files at branch head
vs
- active `connector_source_binding` rows for that mapping.
Then:
- file exists and binding exists -> update existing object with a new version if content changed;
- file exists and no binding exists -> create a new object and binding;
- binding exists and file no longer exists -> tombstone the binding and mark the object deleted/inactive;
- file reappears at a previously deleted path -> create a new object identity, do not revive the tombstoned one.
### Step 4: create or update config objects
For each live file:
1. parse raw source according to config type;
2. create a new `config_object_version` if content or relevant parsed state changed;
3. project current searchable metadata onto `config_object`;
4. update `connector_source_binding.last_seen_source_revision_ref`;
5. ensure plugin membership exists if the mapping auto-adds to a plugin.
### Step 5: handle deletions
For files no longer present at the branch head:
1. create a deleted version or otherwise mark the object deleted/inactive;
2. close the active `connector_source_binding`;
3. insert `connector_source_tombstone` with the deleted path and prior object id;
4. keep plugin membership history intact, but exclude deleted objects from active delivery.
## Plugin auto-membership behavior
If a mapping is bound to a plugin and `auto_add_to_plugin = true`:
- newly discovered files create config objects and are automatically added to that plugin;
- updated files stay in the plugin;
- deleted files remain historically associated but are not active downloadable members.
If a mapping has no plugin binding:
- config objects are still ingested and managed by the connector;
- plugin membership can be added manually later through the API/UI.
## Current recommendation on diffs vs full scan
The recommended answer to your question is:
- yes, after a qualifying webhook we should read the repo state and ingest from that state.
More precisely:
- we should reconcile against the selected branch head, not trust file diffs alone.
The changed-file list from GitHub push events can be useful later as an optimization, but it should not be the authoritative ingestion algorithm for v1.
## Idempotency and retries
The sync worker should be idempotent.
That means:
- same connector target + same head SHA should be safe to process more than once;
- if a previous attempt failed halfway through, we should be able to rerun it;
- duplicate webhook deliveries should not create duplicate objects or bindings.
Good guardrails:
- dedupe jobs on `connector_target_id + head_sha`;
- store sync event status transitions;
- skip creating a new version if parsed content did not materially change.
## Failure handling
If some files fail to parse:
- do not fail the entire connector target unless the repo itself could not be read;
- record per-file failures in sync metadata;
- leave previously successful objects intact;
- mark the affected object or sync event with an ingestion error state.
Admins should be able to see:
- last successful sync time;
- last attempted commit SHA;
- parse failures by file path;
- whether the connector target is currently healthy.
## Security and permissions
### GitHub side
We should request the smallest practical GitHub App permissions needed for branch-head reconciliation.
Recommended repository permissions:
- `Contents: Read-only`
- required to read files, trees, and branch-head content for mapped paths;
- `Metadata: Read-only`
- required for basic repository identity and repo metadata;
Recommended account/install scope:
- installable on organizations and optionally user accounts if we want both use cases;
- repo access should preferably be selected repos, not all repos, unless the user explicitly chooses broader scope.
Recommended webhook subscriptions:
- `push`
- primary content-ingestion trigger for the selected branch;
- `installation`
- detect app uninstalls or installation-level lifecycle changes;
- `installation_repositories`
- detect when repo access is added or removed from the installation.
Optional later webhook subscriptions if product needs them:
- `repository`
- useful for rename/archive/default-branch changes if we want explicit lifecycle updates.
Permissions we should avoid unless later requirements demand them:
- write permissions on repository contents;
- issues, pull requests, actions, deployments, or admin permissions;
- any org/user permissions unrelated to connector setup and repo reading.
Operational note:
- installation tokens are generated from the GitHub App installation and are not themselves a separate permission choice, but the app/server must securely mint and use them only when reading the configured repo state.
### OpenWork side
Separate RBAC should govern:
- who can connect a GitHub installation;
- who can create connector instances;
- who can edit mappings;
- who can bind mappings to plugins;
- who can manually edit ingested objects after sync.
## State we should preserve
For every ingested GitHub-backed object, we should preserve:
- installation id;
- GitHub account/org login;
- repository id;
- repo owner/name;
- branch;
- mapping id;
- connector target `remote_id` (`org/repo`);
- source path;
- file name and extension;
- last seen commit SHA;
- sync event history;
- tombstone history for deleted paths.
## Suggested lifecycle summary
### On setup
1. install GitHub App
2. create connector account
3. create connector instance and target repo/branch
4. create mappings
5. run initial full reconciliation
### On qualifying push webhook
1. verify event
2. check selected branch match
3. enqueue reconciliation
4. fetch current branch-head state
5. evaluate mappings
6. create/update/delete/tombstone config objects
7. update plugin memberships
8. mark sync event complete
## Recommendation summary
The GitHub connector should work like this:
- GitHub App installation gives OpenWork repo access and webhooks;
- admins map repo paths on a selected branch to config object types and plugins;
- GitHub push events on the selected branch trigger async reconciliation;
- OpenWork reads the current branch-head repo state and ingests from that state;
- OpenWork compares desired current files against existing bindings to create, update, delete, and tombstone objects;
- plugin auto-membership is driven by connector mappings.
That keeps the system deterministic, retryable, and aligned with the one-source-of-truth rule.
## Open questions
- Should v1 read repo state through tree APIs, archive downloads, or a shallow git mirror worker?
- Should we ingest only mapped paths, or fetch the whole tree and filter locally?
- Do we want manual "resync now" controls per connector target?
- Do we want to expose last synced commit SHA in the admin UI?

View File

@@ -0,0 +1,307 @@
# New Plugin Arch Admin API
This document describes the authenticated admin/API-consumer surface for managing the new plugin architecture.
Use this for:
- OpenWork admin UI
- direct authenticated API clients
- backend route planning
Base prefix:
- `/v1/orgs/:orgId/...`
## Principles
- expose logical resources, not raw tables;
- return current projections for current-state list/detail endpoints;
- keep version history endpoints explicit;
- keep connector sync async and observable;
- keep type-specific convenience endpoints optional but available where UI clarity benefits.
Current implementation note:
- until dedicated org-capability persistence exists, create/manage-account style admin actions are gated by org owner/admin membership in the endpoint layer.
## Config objects
### List/search config objects
- `GET /v1/orgs/:orgId/config-objects`
Suggested query params:
- `type`
- `status`
- `sourceMode`
- `pluginId`
- `connectorInstanceId`
- `q`
- `limit`
- `cursor`
- `includeDeleted`
Returns one row per config object, not one row per version.
### Get one config object
- `GET /v1/orgs/:orgId/config-objects/:configObjectId`
### Create config object
- `POST /v1/orgs/:orgId/config-objects`
Suggested body shape:
```json
{
"type": "skill",
"sourceMode": "cloud",
"pluginIds": ["plugin_123"],
"input": {
"rawSourceText": "...",
"parserMode": "opencode"
}
}
```
### Create a new version for an object
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/versions`
Suggested body shape:
```json
{
"input": {
"rawSourceText": "..."
},
"reason": "manual edit"
}
```
### Lifecycle endpoints
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/archive`
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/delete`
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/restore`
### Object/plugin membership endpoints
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/plugins`
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/plugins`
- `DELETE /v1/orgs/:orgId/config-objects/:configObjectId/plugins/:pluginId`
### Object access endpoints
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/access`
- `POST /v1/orgs/:orgId/config-objects/:configObjectId/access`
- `DELETE /v1/orgs/:orgId/config-objects/:configObjectId/access/:grantId`
## Config object versions
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/versions`
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/versions/:versionId`
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/versions/latest`
- `GET /v1/orgs/:orgId/config-objects/:configObjectId/versions/compare?from=:versionA&to=:versionB`
## Type-specific convenience endpoints
These should sit on top of the shared config-object model.
### Skills
- `GET /v1/orgs/:orgId/skills`
- `POST /v1/orgs/:orgId/skills`
- `GET /v1/orgs/:orgId/skills/:configObjectId`
- `POST /v1/orgs/:orgId/skills/:configObjectId/versions`
- `POST /v1/orgs/:orgId/skills/validate`
- `POST /v1/orgs/:orgId/skills/preview`
### Agents
- `GET /v1/orgs/:orgId/agents`
- `POST /v1/orgs/:orgId/agents`
- `GET /v1/orgs/:orgId/agents/:configObjectId`
- `POST /v1/orgs/:orgId/agents/:configObjectId/versions`
- `POST /v1/orgs/:orgId/agents/validate`
### Commands
- `GET /v1/orgs/:orgId/commands`
- `POST /v1/orgs/:orgId/commands`
- `GET /v1/orgs/:orgId/commands/:configObjectId`
- `POST /v1/orgs/:orgId/commands/:configObjectId/versions`
- `POST /v1/orgs/:orgId/commands/validate`
- `POST /v1/orgs/:orgId/commands/render-preview`
### Tools
- `GET /v1/orgs/:orgId/tools`
- `POST /v1/orgs/:orgId/tools`
- `GET /v1/orgs/:orgId/tools/:configObjectId`
- `POST /v1/orgs/:orgId/tools/:configObjectId/versions`
- `POST /v1/orgs/:orgId/tools/analyze`
- `POST /v1/orgs/:orgId/tools/validate`
### MCPs
- `GET /v1/orgs/:orgId/mcps`
- `POST /v1/orgs/:orgId/mcps`
- `GET /v1/orgs/:orgId/mcps/:configObjectId`
- `POST /v1/orgs/:orgId/mcps/:configObjectId/versions`
- `POST /v1/orgs/:orgId/mcps/validate`
- `POST /v1/orgs/:orgId/mcps/:configObjectId/test-connection`
- `POST /v1/orgs/:orgId/mcps/:configObjectId/install-check`
## Plugins
- `GET /v1/orgs/:orgId/plugins`
- `GET /v1/orgs/:orgId/plugins/:pluginId`
- `POST /v1/orgs/:orgId/plugins`
- `PATCH /v1/orgs/:orgId/plugins/:pluginId`
- `POST /v1/orgs/:orgId/plugins/:pluginId/archive`
- `POST /v1/orgs/:orgId/plugins/:pluginId/restore`
### Plugin membership endpoints
- `GET /v1/orgs/:orgId/plugins/:pluginId/config-objects`
- `POST /v1/orgs/:orgId/plugins/:pluginId/config-objects`
- `DELETE /v1/orgs/:orgId/plugins/:pluginId/config-objects/:configObjectId`
- `GET /v1/orgs/:orgId/plugins/:pluginId/resolved`
### Optional plugin release endpoints
- `GET /v1/orgs/:orgId/plugins/:pluginId/releases`
- `POST /v1/orgs/:orgId/plugins/:pluginId/releases`
- `GET /v1/orgs/:orgId/plugins/:pluginId/releases/:releaseId`
### Plugin access endpoints
- `GET /v1/orgs/:orgId/plugins/:pluginId/access`
- `POST /v1/orgs/:orgId/plugins/:pluginId/access`
- `DELETE /v1/orgs/:orgId/plugins/:pluginId/access/:grantId`
## Connector accounts
- `GET /v1/orgs/:orgId/connector-accounts`
- `POST /v1/orgs/:orgId/connector-accounts`
- `GET /v1/orgs/:orgId/connector-accounts/:connectorAccountId`
- `POST /v1/orgs/:orgId/connector-accounts/:connectorAccountId/disconnect`
## Connector instances
- `GET /v1/orgs/:orgId/connector-instances`
- `POST /v1/orgs/:orgId/connector-instances`
- `GET /v1/orgs/:orgId/connector-instances/:connectorInstanceId`
- `PATCH /v1/orgs/:orgId/connector-instances/:connectorInstanceId`
- `POST /v1/orgs/:orgId/connector-instances/:connectorInstanceId/archive`
- `POST /v1/orgs/:orgId/connector-instances/:connectorInstanceId/disable`
- `POST /v1/orgs/:orgId/connector-instances/:connectorInstanceId/enable`
### Connector instance access endpoints
- `GET /v1/orgs/:orgId/connector-instances/:connectorInstanceId/access`
- `POST /v1/orgs/:orgId/connector-instances/:connectorInstanceId/access`
- `DELETE /v1/orgs/:orgId/connector-instances/:connectorInstanceId/access/:grantId`
## Connector targets
- `GET /v1/orgs/:orgId/connector-instances/:connectorInstanceId/targets`
- `POST /v1/orgs/:orgId/connector-instances/:connectorInstanceId/targets`
- `GET /v1/orgs/:orgId/connector-targets/:connectorTargetId`
- `PATCH /v1/orgs/:orgId/connector-targets/:connectorTargetId`
- `POST /v1/orgs/:orgId/connector-targets/:connectorTargetId/resync`
Example GitHub target body:
```json
{
"type": "github",
"remoteId": "org/repo",
"targetKind": "repository_branch",
"config": {
"repositoryId": 123456,
"repositoryFullName": "org/repo",
"branch": "main",
"ref": "refs/heads/main"
}
}
```
## Connector mappings
- `GET /v1/orgs/:orgId/connector-targets/:connectorTargetId/mappings`
- `POST /v1/orgs/:orgId/connector-targets/:connectorTargetId/mappings`
- `PATCH /v1/orgs/:orgId/connector-mappings/:connectorMappingId`
- `DELETE /v1/orgs/:orgId/connector-mappings/:connectorMappingId`
- `POST /v1/orgs/:orgId/connector-mappings/:connectorMappingId/preview`
Example mapping body:
```json
{
"mappingKind": "path",
"selector": "/sales/skills/**",
"objectType": "skill",
"pluginId": "plugin_123",
"autoAddToPlugin": true,
"config": {
"parserMode": "opencode"
}
}
```
## Connector sync events
- `GET /v1/orgs/:orgId/connector-sync-events`
- `GET /v1/orgs/:orgId/connector-sync-events/:connectorSyncEventId`
- `POST /v1/orgs/:orgId/connector-sync-events/:connectorSyncEventId/retry`
## GitHub-specific admin endpoints
- `POST /v1/orgs/:orgId/connectors/github/setup`
- `POST /v1/orgs/:orgId/connectors/github/accounts`
- `GET /v1/orgs/:orgId/connectors/github/accounts/:connectorAccountId/repositories`
- `POST /v1/orgs/:orgId/connectors/github/validate-target`
## Response conventions
List:
```json
{
"items": [],
"nextCursor": null
}
```
Detail:
```json
{
"item": {}
}
```
Mutation:
```json
{
"ok": true,
"item": {}
}
```
Async action:
```json
{
"ok": true,
"queued": true,
"job": {
"id": "job_123"
}
}
```

View File

@@ -0,0 +1,70 @@
# New Plugin Arch API
This document is now the API index for the new plugin architecture.
## API docs
- `prds/new-plugin-arch/admin-api.md`
- authenticated admin and direct-management APIs
- `prds/new-plugin-arch/delivery-api.md`
- future client delivery and install-state APIs
- `prds/new-plugin-arch/webhooks-api.md`
- public webhook ingress and async sync trigger shapes
- `prds/new-plugin-arch/GitHub-connector.md`
- GitHub-specific connector flow, permissions, webhook behavior, and reconciliation model
## Shared principles
- expose logical resources, not raw tables;
- current-state endpoints should return projected current rows, not version-history duplicates;
- version/history endpoints should be explicit;
- delivery APIs should stay distinct from admin mutation APIs;
- public webhooks should stay distinct from authenticated APIs.
## Suggested split of responsibility
### Admin API
Use for:
- config object CRUD
- version history access
- plugin management
- access grants
- connector setup and mapping management
- sync-event inspection and retries
### Delivery API
Use for:
- listing accessible plugins for clients/users
- manifest retrieval
- download/install payloads
- reporting and comparing installed state
### Webhooks API
Use for:
- public connector ingress
- signature verification
- normalized webhook envelope handling
- async sync enqueue contracts
## Current recommendations
- keep one shared `config-objects` admin surface and add type-specific convenience endpoints where UI needs them;
- keep current-state search/list endpoints separate from version-history endpoints;
- treat plugin access management as a first-class API surface;
- keep connector setup, target, mapping, and sync APIs explicit;
- keep public webhook ingress separate from authenticated APIs.
## Current gaps
Still to decide:
- exact auth model for end-user delivery endpoints;
- rolling-latest vs release-snapshot delivery semantics;
- which type-specific validate/preview endpoints are public vs internal-only;
- whether bulk mutation endpoints are needed for large plugin or mapping edits.

View File

@@ -0,0 +1,29 @@
# Config Types
These docs describe the unique data shape for each supported config type in the new plugin architecture.
They build on the shared model in `prds/new-plugin-arch/datastructure.md`:
- `config_object` = current searchable projection
- `config_object_version` = immutable history and latest-version source of truth
Related API design:
- `prds/new-plugin-arch/api.md`
Type-specific docs:
- `prds/new-plugin-arch/config-types/skills.md`
- `prds/new-plugin-arch/config-types/agents.md`
- `prds/new-plugin-arch/config-types/commands.md`
- `prds/new-plugin-arch/config-types/tools.md`
- `prds/new-plugin-arch/config-types/mcps.md`
Recommended pattern across all types:
- keep one shared `config_object` / `config_object_version` backbone;
- project current, queryable metadata onto `config_object`;
- preserve the raw source artifact on `config_object_version`;
- encrypt key payload/content columns at rest across all config types;
- keep friendly metadata like `title` and `description` plaintext when needed for dashboard display and search;
- add one type-specific current projection table per type when the shape is meaningfully different or needs fast filtering.

View File

@@ -0,0 +1,117 @@
# Agents Data Structure
## Source formats to support
Source: `https://opencode.ai/docs/agents/`
OpenCode agents can come from:
- markdown files in `.opencode/agents/`
- JSON config under `agent` in `opencode.json`
For markdown agents:
- filename becomes the agent name
- file body becomes the prompt
- frontmatter provides structured config
Recognized OpenCode agent fields from the config schema:
- `model`
- `variant`
- `temperature`
- `top_p`
- `prompt`
- `tools` deprecated
- `disable`
- `description`
- `mode` (`subagent`, `primary`, `all`)
- `hidden`
- `options`
- `color`
- `steps`
- `maxSteps` deprecated
- `permission`
Important schema behavior:
- unknown keys are allowed and moved into `options`
- legacy `tools` config is converted into permissions
- legacy `maxSteps` is normalized into `steps`
## Canonical storage recommendation
### Shared tables
- `config_object`
- `config_object_version`
### Current projection table
Add:
- `config_object_agent_current`
Suggested columns:
- `config_object_id`
- `agent_name`
- `description`
- `prompt_text`
- `mode`
- `hidden` nullable
- `disabled` nullable
- `model` nullable
- `variant` nullable
- `temperature` nullable
- `top_p` nullable
- `steps` nullable
- `color` nullable
- `permission_json` nullable
- `options_json` nullable
- `legacy_tools_json` nullable
- `source_format` (`markdown`, `json`, `connector`)
## Raw version storage
Each version should preserve:
- raw markdown or raw JSON source
- parsed frontmatter/config JSON
- raw prompt body if markdown-based
- normalized config JSON after alias/deprecation handling if we want auditability
## Search and title strategy
Friendly title strategy:
- use the agent name derived from filename or JSON key
Description strategy:
- use `description` as the primary searchable summary
- if needed, include prompt text in `search_text`, but not as the main title/description fields
Default current search should hit:
- `config_object.title`
- `config_object.description`
- optional `config_object_agent_current.mode`
- optional `config_object_agent_current.model`
## Why an agent-specific table helps
Agents have a configuration-heavy shape with many current-state filters that may matter in UI:
- mode
- model
- hidden/disabled state
- steps cap
- permission summary
Putting these on a type-specific projection table avoids repeated JSON scans.
## Table recommendation summary
- shared backbone: `config_object`, `config_object_version`
- type projection: `config_object_agent_current`

View File

@@ -0,0 +1,92 @@
# Commands Data Structure
## Source formats to support
Source: `https://opencode.ai/docs/commands/`
OpenCode commands can come from:
- markdown files in `.opencode/commands/`
- JSON config under `command` in `opencode.json`
For markdown commands:
- filename becomes the command name
- file body becomes the template
Recognized command fields from the config schema:
- `template` required
- `description` optional
- `agent` optional
- `model` optional
- `subtask` optional
Runtime template features are part of the content rather than top-level fields:
- `$ARGUMENTS`, `$1`, `$2`, etc.
- shell injection like `!\`command\``
- file references like `@path/to/file`
## Canonical storage recommendation
### Shared tables
- `config_object`
- `config_object_version`
### Current projection table
Add:
- `config_object_command_current`
Suggested columns:
- `config_object_id`
- `command_name`
- `description` nullable
- `template_text`
- `default_agent` nullable
- `model` nullable
- `subtask` nullable
- `source_format` (`markdown`, `json`, `connector`)
- `uses_arguments` boolean
- `uses_shell_injection` boolean
- `uses_file_references` boolean
## Raw version storage
Each version should preserve:
- raw markdown or raw JSON source
- parsed command config JSON
- raw template text
## Search and title strategy
Friendly title strategy:
- use the command name derived from filename or JSON key
Description strategy:
- use explicit `description` when present
- if missing, optionally derive a short summary from the first sentence of `template_text` for dashboard display only
Search should hit current command rows, not version history.
## Why a command-specific table helps
Commands are structurally simple, but users will likely want to filter by:
- target agent
- whether the command runs as a subtask
- whether the template uses arguments or shell injection
These are current-state concerns, so a small projection table is enough.
## Table recommendation summary
- shared backbone: `config_object`, `config_object_version`
- type projection: `config_object_command_current`

View File

@@ -0,0 +1,142 @@
# MCPs Data Structure
## Source formats to support
Source docs:
- `https://opencode.ai/docs/mcp-servers/`
Actual allowed keys verified from the OpenCode SDK/config schema:
- local MCP
- `type: "local"`
- `command: string[]`
- `environment?: Record<string, string>`
- `enabled?: boolean`
- `timeout?: number`
- remote MCP
- `type: "remote"`
- `url: string`
- `enabled?: boolean`
- `headers?: Record<string, string>`
- `oauth?: McpOAuthConfig | false`
- `timeout?: number`
- OAuth config
- `clientId?: string`
- `clientSecret?: string`
- `scope?: string`
Important schema behavior:
- the OpenCode MCP config is strict and discriminated by `type`
- unlike agents, MCP objects do not accept arbitrary extra keys in the core schema
## Canonical storage recommendation
### Shared tables
- `config_object`
- `config_object_version`
### Current projection table
Add:
- `config_object_mcp_current`
Suggested columns:
- `config_object_id`
- `mcp_name`
- `connection_type` (`local`, `remote`)
- `enabled` nullable
- `timeout_ms` nullable
- `command_json` nullable
- `environment_json` nullable
- `url` nullable
- `headers_json` nullable
- `oauth_mode` nullable (`auto`, `configured`, `disabled`)
- `oauth_client_id` nullable
- `oauth_scope` nullable
- `has_oauth_client_secret` boolean nullable
### OpenWork-specific local install fields
For local MCPs, we likely want extra product fields that are not part of the upstream OpenCode schema but help with distribution and setup.
Suggested additional columns:
- `requires_install` boolean nullable
- `install_command_script` nullable
- `install_docs_link` nullable
- `install_notes` nullable
Optional future additions:
- `install_check_command` nullable
- `platforms_json` nullable
- `package_manager` nullable
## Raw version storage
Each version should preserve:
- raw JSON or markdown-derived config source
- normalized MCP config JSON
- parser version
## Secret-handling note
MCPs are the most likely type to contain secrets or sensitive values.
Recommendations:
- treat `headers` and `environment` as potentially secret-bearing;
- like all config-object key payload columns, MCP core config data should be encrypted at rest;
- avoid copying secret material into searchable top-level text fields;
- if we later support secure secret storage, keep secret references separate from public metadata.
This is especially important for:
- remote MCP headers
- local MCP environment values
- OAuth client secrets
## Search and title strategy
Friendly title strategy:
- use the MCP object name from the config key
Description strategy:
- MCP objects do not have a native `description` field in the upstream schema;
- for dashboard UX, we should probably allow a local friendly description on `config_object.description` even though it is not part of the OpenCode MCP payload.
This means MCPs are a strong example of:
- raw source-of-truth in version history
- plus locally-managed friendly metadata on the parent object
## Why an MCP-specific table helps
MCPs have discriminated connection shapes and operational concerns that we will want to filter by:
- local vs remote
- enabled/disabled
- install required
- OAuth mode
- timeout
These are current operational fields and should not require parsing historical versions for every UI query.
## Table recommendation summary
- shared backbone: `config_object`, `config_object_version`
- type projection: `config_object_mcp_current`
## Open questions specific to MCPs
- Should local install scripts be executable content we store directly, or pointers to bundled files/docs?
- Should secret-bearing headers and environment values be split into secure secret references from day one?
- Do we want friendly dashboard metadata for MCPs to be always locally editable even when the core MCP payload is connector-managed?

View File

@@ -0,0 +1,145 @@
# Skills Data Structure
## Source formats to support
We should support both OpenCode-native and Claude-compatible skill formats.
### OpenCode skill shape
Source: `https://opencode.ai/docs/skills/`
Expected file layout:
- directory name is the skill name
- entry file is always `SKILL.md`
Recognized frontmatter fields in OpenCode:
- `name` required
- `description` required
- `license` optional
- `compatibility` optional
- `metadata` optional string-to-string map
Important OpenCode constraints:
- `name` must match the containing directory name
- `name` is lowercase alphanumeric with single hyphen separators
- `description` is 1-1024 chars
### Claude-compatible skill shape
Sources:
- `https://code.claude.com/docs/en/skills`
- OpenCode also discovers `.claude/skills/*/SKILL.md`
Claude-compatible frontmatter can additionally include:
- `when_to_use`
- `argument-hint`
- `disable-model-invocation`
- `user-invocable`
- `allowed-tools`
- `model`
- `effort`
- `context`
- `agent`
- `hooks`
- `paths`
- `shell`
Claude also supports supporting files alongside `SKILL.md` inside the skill directory.
## Canonical storage recommendation
### Shared tables
Use the shared tables from `prds/new-plugin-arch/datastructure.md`:
- `config_object`
- `config_object_version`
### Current projection table
Add a skill-specific current projection table:
- `config_object_skill_current`
This lets us query current skill-specific metadata without hitting historical versions.
Suggested columns:
- `config_object_id`
- `dialect` (`opencode`, `claude`, `hybrid`)
- `skill_name`
- `description`
- `license` nullable
- `compatibility` nullable
- `metadata_json` nullable
- `when_to_use` nullable
- `argument_hint` nullable
- `disable_model_invocation` nullable
- `user_invocable` nullable
- `allowed_tools_json` nullable
- `model` nullable
- `effort` nullable
- `context_mode` nullable
- `subagent` nullable
- `hooks_json` nullable
- `paths_json` nullable
- `shell` nullable
- `has_supporting_files` boolean
- `body_markdown` optional if we want a denormalized current copy for fast preview
## Raw version storage
Each `config_object_version` for a skill should preserve:
- raw `SKILL.md` content
- parsed frontmatter JSON
- parsed body markdown
- parser dialect used
- extraction status / warnings if parsing was partial
If the source came from a connector-backed skill directory, keep path data so we can recreate:
- `SKILL.md` relative path
- skill directory path
- any supporting file paths via connector source records
## Search and title strategy
For skills, current search should hit:
- `config_object.title`
- `config_object.description`
- optionally `config_object.search_text`
- optionally `config_object_skill_current.when_to_use`
Friendly title strategy:
- prefer frontmatter `name`
- if missing in Claude-compatible input, fall back to directory name
Description strategy:
- prefer frontmatter `description`
- optionally append `when_to_use` into `search_text`, but do not necessarily surface it as the main dashboard description
## Why a skill-specific table helps
Skills have the richest frontmatter of any planned type.
A dedicated table helps with:
- filtering skills by invocation mode
- showing whether a skill is user-invocable vs model-invocable
- showing bound model / effort / subagent behavior
- preserving compatible subsets across OpenCode and Claude skill formats
## Table recommendation summary
- shared backbone: `config_object`, `config_object_version`
- type projection: `config_object_skill_current`
- connector path history: `connector_source_binding`, `connector_source_tombstone`

View File

@@ -0,0 +1,115 @@
# Tools Data Structure
## Scope
In this doc, `tool` means a custom OpenCode tool definition, not a built-in native tool like `read` or `bash`.
Source: `https://opencode.ai/docs/tools/` plus `https://opencode.ai/docs/custom-tools/`
## Source formats to support
Custom tools are code modules, usually TypeScript or JavaScript files, placed in:
- `.opencode/tools/`
- `~/.config/opencode/tools/`
Observed tool definition shape from the plugin helper:
- `description` required
- `args` required
- `execute(args, context)` required
The helper type is effectively:
```ts
tool({
description: string,
args: ZodRawShape,
execute(args, context): Promise<string>
})
```
Naming behavior:
- default export -> tool name comes from filename
- multiple named exports -> tool names become `<filename>_<exportname>`
## Canonical storage recommendation
### Shared tables
- `config_object`
- `config_object_version`
### Current projection table
Add:
- `config_object_tool_current`
Suggested columns:
- `config_object_id`
- `tool_name`
- `module_file_name`
- `module_relative_path` nullable
- `export_name` nullable
- `definition_style` (`default_export`, `named_export`)
- `description`
- `args_schema_json` nullable
- `args_schema_text` nullable
- `runtime_language` nullable
- `static_analysis_status`
- `is_multi_tool_module` boolean
## Raw version storage
Each version should preserve:
- raw module source code
- parsed metadata from static analysis if available
- extraction warnings if analysis is partial or failed
## Important design note
Tools are harder to normalize than skills, agents, or commands because the source of truth is executable code.
That means:
- raw code must remain authoritative for reconstruction;
- description and args metadata should be treated as extracted projections;
- extraction may need AST parsing or other static analysis;
- if static analysis fails, the object should still be storable, but may show degraded metadata in the UI.
## Search and title strategy
Friendly title strategy:
- use the final resolved tool name
Description strategy:
- use extracted `description`
- if extraction fails, fall back to file name and a generic label like `Custom tool definition`
## Why a tool-specific table helps
Tools are code-backed and may need UI around:
- resolved tool names
- export style
- argument schema preview
- static analysis health
These are not good candidates for repeated JSON scanning of version history.
## Table recommendation summary
- shared backbone: `config_object`, `config_object_version`
- type projection: `config_object_tool_current`
## Open questions specific to tools
- Do we require successful static analysis before a tool can be published?
- Do we support only JS/TS source at first, or arbitrary-language wrappers as first-class tool objects?
- Do we want to store a JSON Schema projection of args, or only a human-readable summary?

View File

@@ -0,0 +1,525 @@
# New Plugin Arch Data Structure
This document holds the proposed data model and schema direction for the new plugin architecture.
It is intentionally separate from `prds/new-plugin-arch/plan.md` so the plan can stay focused on product direction and architectural decisions while this file captures implementation-oriented structure.
Type-specific shape docs live in:
- `prds/new-plugin-arch/config-types/README.md`
API design lives in:
- `prds/new-plugin-arch/api.md`
RBAC design lives in:
- `prds/new-plugin-arch/rbac.md`
## Guiding rules
- config objects are first-class and versioned;
- plugins link to config object identities, never directly to object versions;
- plugin resolution always uses the latest active object version;
- latest version is derived from `config_object_version` ordering, not stored separately on `config_object`;
- key config payload/data columns should be encrypted at rest;
- friendly current metadata like `title` and `description` can remain plaintext for UI and search;
- connector provenance is stored explicitly;
- deletes are soft and path tombstones are preserved for connector-managed items;
- RBAC shape should stay consistent across config objects, plugins, and connectors.
## Core tables
### `config_object`
Stable identity row for one logical config object.
Suggested columns:
- `id`
- `organization_id`
- `object_type` (`skill`, `mcp`, `command`, `agent`, `hook`, `context`, `custom`)
- `source_mode` (`cloud`, `import`, `connector`)
- `title`
- `description` nullable
- `search_text` nullable
- `slug` or stable org-local key if needed
- `current_file_name` nullable
- `current_file_extension` nullable
- `current_relative_path` nullable
- `status` (`active`, `inactive`, `deleted`, `archived`, `ingestion_error`)
- `created_by_org_membership_id`
- `connector_instance_id` nullable
- `created_at`
- `updated_at`
- `deleted_at` nullable
Notes:
- this is the row plugins reference;
- this is also the row current search and dashboard queries should hit;
- title/description on this row are the current projection derived from the latest version, not an independent historical source of truth;
- `title`, `description`, and `search_text` may remain plaintext because they are intended for dashboard rendering and search;
- `updated_at` is convenience metadata only and should not be treated as the source of truth for latest version resolution;
- connector-managed objects still use the same identity table.
### `config_object_version`
Immutable content/history row for each version of a config object.
Suggested columns:
- `id`
- `config_object_id`
- `normalized_payload_json`
- `raw_source_text` nullable
- `schema_version` or parser version nullable
- `created_via` (`cloud`, `import`, `connector`, `system`)
- `created_by_org_membership_id` nullable
- `connector_sync_event_id` nullable
- `source_revision_ref` nullable
- `is_deleted_version` boolean default false
- `created_at`
Notes:
- object-type-specific fields should generally live in payload JSON, not as many sparse columns on the shared table;
- version rows should not be the primary surface for current library search because that would create duplicate hits across historical versions of the same object;
- a deleted source file can create a terminal deleted version while leaving the identity row intact;
- `normalized_payload_json`, `raw_source_text`, and any equivalent key content columns for config objects should be encrypted at rest;
- `config_object_version` is the single source of truth for version history and latest-version lookup.
Current metadata projection rule:
- after creating a new latest version, parse whatever title/description/friendly metadata can be derived from that version and write the current projection onto the parent `config_object` row;
- current dashboard/search experiences should query `config_object`, not `config_object_version`.
Suggested index:
- (`config_object_id`, `created_at` DESC, `id` DESC)
Latest lookup rule:
- latest version for an object = newest row for that `config_object_id`, ordered by `created_at DESC, id DESC`.
- `created_at` should be database-generated so ordering stays authoritative.
Version number note:
- v1 does not require a separate version-number column on `config_object_version`;
- immutable ids plus `created_at` are enough for history and latest-version resolution;
- add a human-facing version number later only if product UX needs ordered revision labels.
Metadata extraction note:
- some config types derive title/description from file contents, such as skill frontmatter;
- other config types may derive friendly metadata from file name, path, or type-specific parsing rules;
- type-specific extraction rules should run when projecting the latest version onto `config_object`.
### `plugin`
Stable deliverable row.
Suggested columns:
- `id`
- `organization_id`
- `name`
- `description`
- `status`
- `created_by_org_membership_id`
- `created_at`
- `updated_at`
- `deleted_at` nullable
Notes:
- a plugin is the administrator-facing unit of delivery;
- a plugin contains config object identities, not pinned content versions;
- when resolving a plugin, the system selects the newest version row for each linked object.
### `plugin_config_object`
Membership join between plugins and config object identities.
Suggested columns:
- `id`
- `plugin_id`
- `config_object_id`
- `membership_source` (`manual`, `connector`, `api`, `system`)
- `connector_mapping_id` nullable
- `created_by_org_membership_id` nullable
- `created_at`
- `removed_at` nullable
Constraints:
- unique active membership on (`plugin_id`, `config_object_id`)
Notes:
- current implementation keeps one logical membership row per (`plugin_id`, `config_object_id`) and uses `removed_at` for soft removal/reactivation rather than append-only history rows;
- if an object later becomes deleted, the membership row can remain while delivery skips that object.
## Access and RBAC tables
We want the same RBAC model across config objects, plugins, and connectors.
There are two realistic schema options:
1. Separate access tables per resource type
- better foreign keys
- more repeated schema
2. One generic access table
- easier shared logic
- weaker relational guarantees
Current recommendation:
- start with separate tables that share the same shape.
### `plugin_access_grant`
Suggested columns:
- `id`
- `plugin_id`
- `org_membership_id` nullable
- `team_id` nullable
- `org_wide` boolean default false
- `role` or `permission_level`
- `created_by_org_membership_id`
- `created_at`
- `removed_at` nullable
### `config_object_access_grant`
Same shape as plugin access, but scoped to `config_object_id`.
### `connector_instance_access_grant`
Same shape as plugin access, but scoped to `connector_instance_id`.
RBAC note:
- plugin delivery may be implemented primarily by plugin access grants;
- if a team has access to a plugin, that is effectively the publish step.
- config objects and plugins should be private by default;
- sharing with the whole org should be represented as one org-wide grant, not per-user entries.
- use `org_wide = true` for v1.
- member and team sharing should continue to use normal explicit grant rows.
- current implementation also uses one logical grant row per target principal and reactivates it by clearing `removed_at`.
## Connector tables
### `connector_account`
Represents one authenticated or installed connector relationship.
Examples:
- one GitHub App installation
- one future API credential binding
Suggested columns:
- `id`
- `organization_id`
- `connector_type` (`github`, etc.)
- `remote_id`
- `external_account_ref`
- `display_name`
- `status`
- `created_by_org_membership_id`
- `created_at`
- `updated_at`
Notes:
- secrets should stay out of git-backed repo files and remain private;
- `id` is OpenWork's local primary key, while `remote_id` is the stable connector-side identifier we can use across different connector families;
- this row is the reusable "one-time setup" layer.
### `connector_instance`
Represents one configured use of a connector account.
Examples:
- a GitHub repo + branch configuration
- a future API collection endpoint mapping
Suggested columns:
- `id`
- `organization_id`
- `connector_account_id`
- `connector_type`
- `remote_id` nullable
- `name`
- `status`
- `instance_config_json`
- `last_synced_at` nullable
- `last_sync_status` nullable
- `last_sync_cursor` nullable
- `created_by_org_membership_id`
- `created_at`
- `updated_at`
Notes:
- one connector instance may feed multiple plugins;
- one plugin may include objects from multiple connector instances;
- one connector instance may ingest objects without direct plugin auto-membership;
- `remote_id` is optional here because some connector instances may not map cleanly to one remote object, while others will.
### `connector_target`
Represents the external source target inside an instance.
Examples:
- repo owner/name
- branch
- API endpoint family
- collection identifier
Suggested columns:
- `id`
- `connector_instance_id`
- `connector_type`
- `remote_id`
- `target_kind`
- `external_target_ref`
- `target_config_json`
- `created_at`
- `updated_at`
Notes:
- this table lets us support git and non-git connectors with one shared abstraction;
- `remote_id` should be the canonical external identifier for the target, such as `org/repo` for GitHub repo targets.
### `connector_mapping`
Maps part of a connector target into a config object type and optional plugin behavior.
Suggested columns:
- `id`
- `connector_instance_id`
- `connector_target_id`
- `connector_type`
- `remote_id` nullable
- `mapping_kind` (`path`, `api`, `custom`)
- `selector`
- `object_type`
- `plugin_id` nullable
- `auto_add_to_plugin` boolean
- `mapping_config_json`
- `created_at`
- `updated_at`
Examples:
- selector `/sales/skills/**` -> `skill` -> plugin A
- selector `/sales/commands/**` -> `command` -> plugin A
- selector `/finance/skills/**` -> `skill` -> plugin B
Notes:
- this is the row that captures the default parent-path -> plugin behavior;
- advanced/manual plugins can still include connector-managed objects outside this automatic mapping;
- `remote_id` can be used if a connector exposes mapping-level remote identifiers, but it is optional.
### `connector_sync_event`
Audit row for each webhook/poll/sync execution.
Suggested columns:
- `id`
- `connector_instance_id`
- `connector_target_id` nullable
- `connector_type`
- `remote_id` nullable
- `event_type`
- `external_event_ref` nullable
- `source_revision_ref` nullable
- `status`
- `summary_json`
- `started_at`
- `completed_at` nullable
Notes:
- useful for debugging, replay decisions, and ingestion history;
- for GitHub this should also capture delivery ids and head commit SHAs inside `summary_json` or promoted columns if we need faster filtering.
### `connector_source_binding`
Links a live config object identity to its external source location.
Suggested columns:
- `id`
- `config_object_id`
- `connector_instance_id`
- `connector_target_id`
- `connector_mapping_id`
- `connector_type`
- `remote_id` nullable
- `external_locator`
- `external_stable_ref` nullable
- `last_seen_source_revision_ref` nullable
- `status`
- `created_at`
- `updated_at`
- `deleted_at` nullable
Examples of `external_locator`:
- repo path
- API resource id
- remote document key
Notes:
- one live object should normally have one active source binding;
- this is how we know which external path/resource created the object;
- `remote_id` can hold a stable connector-native file/resource id when the remote system provides one.
### `connector_source_tombstone`
Preserves deleted source locations so we do not accidentally revive old identities.
Suggested columns:
- `id`
- `connector_instance_id`
- `connector_target_id`
- `connector_mapping_id`
- `connector_type`
- `remote_id` nullable
- `external_locator`
- `former_config_object_id`
- `deleted_in_sync_event_id`
- `deleted_source_revision_ref` nullable
- `created_at`
Notes:
- if the same path later reappears, ingestion creates a new config object identity;
- this table prevents accidental reconnect of a recreated file to an old object.
## Optional release/install tables
We have not finalized delivery yet, but these are likely candidates.
### `plugin_release`
Optional first-class release/snapshot row for a plugin.
Suggested columns:
- `id`
- `plugin_id`
- `release_kind` (`manual`, `system`, `access_change`, `sync_snapshot`)
- `created_by_org_membership_id` nullable
- `created_at`
- `notes` nullable
### `plugin_release_item`
Snapshot of the config object versions included at release time.
Suggested columns:
- `id`
- `plugin_release_id`
- `config_object_id`
- `config_object_version_id`
- `created_at`
Notes:
- even if runtime delivery is rolling latest, these tables can still be useful for audit, rollback, and support;
- if we decide releases are unnecessary, these tables can be deferred.
## Suggested write patterns
### Creating a new cloud/import object
1. insert `config_object`
2. insert first `config_object_version`
3. parse current metadata from that version and update `config_object.title`, `description`, `search_text`, and any current file metadata
4. optionally update `config_object.updated_at`
5. optionally insert `plugin_config_object`
### Connector sync updating an existing object
1. create `connector_sync_event`
2. locate active `connector_source_binding`
3. insert new `config_object_version`
4. parse current metadata from that version and update the parent `config_object` projection
5. optionally update `config_object.updated_at`
6. update `connector_source_binding.last_seen_source_revision_ref`
### Connector sync deleting an object
1. create deleted `config_object_version` or mark identity status as deleted
2. update `config_object.status` and clear or adjust current searchable projection as needed
3. close `connector_source_binding`
4. insert `connector_source_tombstone`
5. keep `plugin_config_object` history intact
## Latest-version strategy
To keep one source of truth and avoid out-of-date derived state, we should not store `latest_version_id` on `config_object` in v1.
Instead:
- treat `config_object_version` as the only source of truth for version ordering;
- determine latest by query using `created_at DESC, id DESC`;
- keep version rows immutable.
Example lookup pattern:
```sql
select v.*
from config_object co
join config_object_version v on v.config_object_id = co.id
where co.id = ?
order by v.created_at desc, v.id desc
limit 1;
```
Why this is the current recommendation:
- no duplicated latest-version pointer to drift out of sync;
- no revision counter race condition;
- simple write path;
- acceptable read cost given expected version counts and proper indexing;
- current library search can still stay fast because it queries `config_object`, not historical versions.
Future option:
- if reads later prove too expensive, we can add a derived latest pointer as an optimization, but not as the authoritative source of truth.
## Current schema recommendation
If we had to start implementation now, the minimum useful table set would be:
- `config_object`
- `config_object_version`
- `plugin`
- `plugin_config_object`
- `plugin_access_grant`
- `connector_account`
- `connector_instance`
- `connector_target`
- `connector_mapping`
- `connector_sync_event`
- `connector_source_binding`
- `connector_source_tombstone`

View File

@@ -0,0 +1,134 @@
# New Plugin Arch Delivery API
This document covers the future API surface for delivering plugins and config objects to end clients.
This area is still less defined than the admin API.
## Purpose
Delivery endpoints should answer:
- what plugins a user/team can access;
- what config objects are currently resolved inside a plugin;
- what content a client should install;
- what version/state a client currently has.
## Principles
- delivery should expose resolved current state, not raw history by default;
- access checks should happen at plugin/object delivery boundaries;
- delivery endpoints should be separate from admin mutation endpoints;
- clients should be able to fetch manifests before downloading content.
## Likely resources
- accessible plugins
- plugin manifests
- config-object download payloads
- client install state
- optional plugin release snapshots
## Candidate endpoints
### List accessible plugins for a client/user
- `GET /v1/orgs/:orgId/delivery/plugins`
Suggested query params:
- `teamId`
- `q`
- `limit`
- `cursor`
### Get one deliverable plugin
- `GET /v1/orgs/:orgId/delivery/plugins/:pluginId`
### Get plugin download manifest
- `GET /v1/orgs/:orgId/delivery/plugins/:pluginId/manifest`
Purpose:
- return the resolved current set of config objects and install metadata without forcing immediate download of all payloads.
Likely response shape:
```json
{
"plugin": {
"id": "plugin_123",
"name": "Sales plugin"
},
"items": [
{
"configObjectId": "cfg_123",
"type": "skill",
"title": "mcp-arch",
"versionId": "ver_123"
}
]
}
```
### Download one plugin bundle
- `GET /v1/orgs/:orgId/delivery/plugins/:pluginId/download`
Purpose:
- provide the resolved content package for a plugin.
Open question:
- whether this should return one bundle blob, a manifest plus signed URLs, or structured JSON with embedded encrypted payloads.
### Download one config object
- `GET /v1/orgs/:orgId/delivery/config-objects/:configObjectId/download`
Purpose:
- allow targeted install/update of a single object.
### Optional release-aware delivery
If plugin releases become first-class:
- `GET /v1/orgs/:orgId/delivery/plugins/:pluginId/releases/:releaseId/manifest`
- `GET /v1/orgs/:orgId/delivery/plugins/:pluginId/releases/:releaseId/download`
## Client state endpoints
These likely matter once client sync/install flows are defined.
### Report client install state
- `POST /v1/orgs/:orgId/delivery/clients/:clientId/state`
Purpose:
- let a client report what plugins/config objects/versions are installed.
### Get client install state
- `GET /v1/orgs/:orgId/delivery/clients/:clientId/state`
### Compute client updates
- `POST /v1/orgs/:orgId/delivery/clients/:clientId/check-updates`
Purpose:
- compare installed state with accessible current state and return required updates.
## Recommended next decisions
We still need to lock down:
- rolling latest vs release snapshot delivery;
- manifest shape per config type;
- auth model for client delivery;
- how encrypted payloads are transported to clients;
- how install conflicts and local overrides are represented.

View File

@@ -0,0 +1,63 @@
# Learnings
Read this file before starting any implementation step.
After completing a step, prepend any new learnings to the top of this file.
## What counts as a learning
- architecture constraints discovered in the current codebase
- route or schema patterns that should be reused
- persistence limitations or migration gotchas
- RBAC edge cases or clarified decisions
- webhook or raw-body handling pitfalls
- test harness quirks
- anything that would save the next agent time or prevent a bad implementation choice
## Prepend format
Use this shape for new entries:
```md
## YYYY-MM-DD Step N - Short title
- learning 1
- learning 2
- follow-up or caution
```
## Current entries
## 2026-04-17 Post-step cleanup - Type tightening and naming
- The route directory is now `ee/apps/den-api/src/routes/org/plugin-system/`; `plugin-arch` was only the planning nickname and was too confusing as a long-lived API module name.
- The plugin-system route wrapper can stay type-safe enough without `@ts-nocheck` by isolating Hono middleware registration behind a tiny `withPluginArchOrgContext()` helper and using explicit request-part adapters for `param`, `query`, and `json` reads.
- The Drizzle layer is happiest when plugin-system store/access helpers use concrete typed-id aliases (`ConfigObjectId`, `PluginId`, `ConnectorInstanceId`, etc.) plus discriminated resource-target unions; broad `string` or mixed-id unions quickly break `eq()` and `inArray()` inference.
- Connector GitHub App config should stay separate from normal GitHub OAuth login config, so den-api now reserves its own optional connector env namespace (`GITHUB_CONNECTOR_APP_*`) instead of reusing the existing auth credentials.
## 2026-04-17 Step 9 - Test harness and verification
- den-api package tests currently work best from `ee/apps/den-api/test/` rather than `ee/apps/den-api/src/`, because the package `tsconfig.json` compiles `src/**` during `pnpm --filter @openwork-ee/den-api build` and would otherwise drag Bun-only test imports into the production build.
- Bun is available in this workspace and is the easiest way to add focused TS tests for den-api without adding a new package runner; the current test slice uses `bun test ee/apps/den-api/test/...`.
- Webhook route tests can avoid database setup by targeting the early-exit paths: invalid signatures reject before JSON parsing/side effects, and valid signed payloads without an installation id return a clean ignored response before any connector lookup.
- Access-helper tests can import plugin-system modules safely if they seed the minimal env vars first, because `db.ts` pulls `env.ts` during module load even when the specific test only exercises pure helper functions.
## 2026-04-17 Step 5-8,10 - RBAC, routes, webhook ingress, and doc reconciliation
- The admin endpoint slice now lives in `ee/apps/den-api/src/routes/org/plugin-system/routes.ts`, with shared access checks in `ee/apps/den-api/src/routes/org/plugin-system/access.ts` and persistence/serialization helpers in `ee/apps/den-api/src/routes/org/plugin-system/store.ts`.
- `resolveOrganizationContextMiddleware` depends on validated `:orgId` params, so plugin-system routes must run `paramValidator(...)` before org-context resolution; the custom route helper was adjusted to inject auth/org middleware after per-route validators.
- The current endpoint layer has no separate org-capability table, so create/manage-account capabilities are implemented as org owner/admin checks only; resource-level edit/view behavior still uses direct, team, org-wide, and plugin-inherited access resolution.
- Config-object inherited access from plugins is view-only in the helper layer; edit/manage actions still require direct object grants or org-admin override, which keeps plugin delivery access from accidentally becoming object edit access.
- GitHub webhook ingress is registered at `ee/apps/den-api/src/routes/webhooks/github.ts`, verifies `X-Hub-Signature-256` against the raw body before JSON parsing, queues `push` sync events into `connector_sync_event`, and treats installation lifecycle updates as connector-account health changes rather than content sync jobs.
- Because dependencies are still missing in this worktree, verification remains limited to parse-oriented `tsc --noResolve` calls, `git diff --check`, and JSON validation; real den-api execution and automated route tests are still blocked until the workspace is installed.
## 2026-04-17 Step 4 - Den DB persistence backbone
- The persistence backbone now lives in `ee/packages/den-db/src/schema/sharables/plugin-arch.ts`; den-api can query these tables directly through the existing shared `@openwork-ee/den-db/schema` export instead of adding a new repository layer.
- Encrypted config-object payload storage is implemented as encrypted `text` columns, not native MySQL `json`, because the existing `encryptedColumn()` helper serializes ciphertext blobs; anything that needs indexed/searchable current metadata still has to stay projected onto plaintext columns on `config_object`.
- To keep MySQL uniqueness simple in v1, plugin memberships and access grants currently use one row per logical relationship plus `removed_at`, not append-only historical rows; re-activating a removed membership/grant should update the existing row instead of inserting a duplicate.
- The migration for this step was written manually as `ee/packages/den-db/drizzle/0010_plugin_arch.sql` and journaled in `drizzle/meta/_journal.json` because Drizzle generation is still blocked by missing local package dependencies in this worktree.
- `connector_source_binding` is unique on `config_object_id` only; deleted-path history is preserved in `connector_source_tombstone`, so recreated paths should mint a new object identity rather than trying to reactivate the old binding row.
## 2026-04-17 Step 1-3 - Placement, contracts, and shared schemas
- The org-scoped plugin-architecture admin APIs belong in `ee/apps/den-api/src/routes/org`, not `apps/server` or `apps/server-v2`: the existing authenticated `/v1/orgs/:orgId/...` surface, Better Auth session flow, org context middleware, and Hono OpenAPI route style already live there.
- The persistence home for plugin-architecture resources is `ee/packages/den-db/src/schema`; `apps/server-v2` is workspace-first SQLite state for local/runtime control and is the wrong place for durable org resources like config objects, plugins, connector accounts, and grants.
- den-api route validation is built around `hono-openapi` validators plus `requireUserMiddleware`, `resolveOrganizationContextMiddleware`, and `resolveMemberTeamsMiddleware`, so new route code should reuse those patterns instead of inventing a separate request parsing layer.
- den-api does not currently have a webhook helper or raw-body middleware; the GitHub ingress should read `c.req.raw` directly, verify `X-Hub-Signature-256` before JSON parsing, and only then normalize the payload.
- Adding the shared schemas early forced new TypeID families for config objects, versions, grants, plugins, and connector resources; later DB tables and route params should reuse those ids instead of falling back to plain strings.
- Local verification is currently dependency-blocked in this worktree: `pnpm --filter @openwork-ee/den-api build` and direct `tsc` both stop immediately because package-local dependencies like `tsup`, `zod`, and other workspace modules are not installed in this checkout.

View File

@@ -0,0 +1,400 @@
# New Plugin Arch
This document is a working architecture draft for the next-generation shared config system.
## Purpose
Replace the current skill hub model with a more general plugin system that can:
- manage many config object types, not just skills;
- ingest config objects from multiple sources, including cloud-native editing, imports, and connectors;
- package those objects into deliverables called plugins;
- apply one consistent RBAC model across objects, plugins, and connectors;
- preserve source-of-truth and version provenance, especially for connector-managed content.
## Core Concepts
### Config object
A config object is a single installable unit of product configuration.
Planned object types:
- Skill
- MCP
- Command
- Agent
- Hook
- Context
- Custom
Notes:
- Each type has its own shape and validation rules.
- Each type may install differently on the client.
- Each object needs a stable identity that survives edits and version changes.
### Tags (deferred)
We are removing config object groups from the v1 design.
Possible future direction:
- add a tag system later for organization and filtering.
### Plugin
A plugin is a deliverable made from:
- config objects.
Key idea:
- administrators assemble plugins as the unit that is delivered to users.
Plugins replace the current mental model of a hub.
## Source Model
Config objects may be created or updated from multiple source channels:
- Cloud: created or edited directly in OpenWork Cloud.
- Import: created from uploaded/imported material.
- Connector: created and updated from an external system.
### Source ownership rule
If an object is created by a connector, it is only editable through that connector.
Implications:
- cloud/import objects are first-party editable in OpenWork;
- connector-managed objects may still be editable in OpenWork by users with permission, but connector sync remains authoritative and may overwrite content on the next sync;
- OpenWork must keep enough source metadata to re-ingest, reconcile, audit, and display provenance.
## Connector Model
Connectors are reusable integrations that ingest config objects from external systems.
Examples:
- GitHub
- future git-based providers
- future API-based providers
- future non-file-based providers
### Connector layers
There appear to be at least two layers:
1. Connector type
- the implementation family, such as GitHub.
2. Connector configuration / instance
- a specific configured connection, such as one GitHub app installation plus repo/branch/mapping settings.
The current working direction is:
- one-time setup for a connector;
- per-instance configuration for a repo or external resource;
- that configured connector can then be tied to one or more plugins, or to config objects without an auto-managed plugin.
### GitHub example
For GitHub, the likely model is:
- a GitHub App is installed on a repo or org;
- OpenWork stores the repo binding;
- the user configures the branch to watch;
- the user configures path mappings from source paths to config object types;
- webhook events trigger ingestion and reconciliation.
Examples of mappings:
- `/bundles/skills/**` -> `skill`
- `/bundles/agents/**` -> `agent`
### Connector mapping responsibilities
A configured connector likely needs:
- source kind, such as `github`;
- source-specific config, such as branch;
- path or API mappings;
- plugin binding information;
- ingestion mode and parser rules;
- webhook or polling state;
- last successful sync state.
### Connector-to-plugin behavior
Current direction:
- one connector instance may feed multiple plugins;
- one plugin may include objects from multiple connector instances;
- one connector instance may also ingest objects without directly feeding a plugin;
- plugin membership may be manually edited even when some members originated from connectors.
Default UX direction:
- setup should offer one plugin per parent path;
- if `/sales/skills` maps to plugin A, then sibling mapped paths under that same parent should default to the same plugin rather than splitting automatically;
- a different parent path such as `/finance/skills` may map to a different plugin;
- advanced/manual plugin composition may still exist outside the default connector setup flow.
### File lifecycle behavior
For file-backed connectors:
- new file -> create new config object and add it to the bound plugin automatically;
- changed file -> create a new version or revision of the existing object;
- deleted file -> mark the current object as deleted on our side, but do not hard-delete it;
- recreated file at the same path -> create a new object identity rather than reviving the deleted one.
Important nuance:
- once a file-backed object is deleted, we should preserve historical path linkage, but we should not continue updating that deleted object if a file later reappears.
## Proposed Domain Model
### Stable identity layer
We likely need a stable identity table for each logical config object, separate from versioned content rows.
Why:
- plugins should point at stable identities, not at a mutable single content row;
- source ownership and provenance belong to the logical object;
- installation tracking likely belongs to the logical object plus a chosen version;
- object history becomes easier to preserve.
Minimum identity-layer responsibilities:
- stable id;
- org id;
- object type;
- source mode (`cloud`, `import`, `connector`);
- current title / friendly name;
- current description;
- connector instance reference if applicable;
- current file metadata where relevant;
- created by / created at;
- last updated at;
- lifecycle status.
### Version layer
Each edit or connector sync likely creates a new object version row.
Version rows would likely hold:
- version id;
- parent object id;
- parsed metadata for indexing;
- raw source payload or normalized payload;
- source revision reference such as commit SHA, webhook delivery id, or external version token;
- created at;
- created by or created via;
- deletion marker if the version represents removal.
Current open decision:
- whether we ever need human-facing revision numbers beyond immutable version ids plus timestamps.
Current leaning:
- we do want durable history and a clear latest version model;
- for v1, newest-created version is an acceptable latest-version rule if the database owns `created_at` and we use `id` as a tie-breaker;
- external source references like commit SHA should still be stored separately;
- implementation should prefer one source of truth over duplicated latest-version state;
- searchable current metadata should live on the parent object so dashboard queries do not scan historical versions and return duplicates.
### Plugin layer
Plugins likely need:
- stable id;
- org id;
- metadata;
- lifecycle status;
- membership rows pointing to config object identities;
- optional connector bindings if the plugin is connector-managed.
Important:
- plugin membership should preserve historical links even if an included object later becomes deleted or inactive;
- plugins reference config object identities, not pinned versions;
- plugin delivery resolves the latest version of each linked object;
- delivery logic can decide whether deleted items are omitted from downloads.
## RBAC Direction
RBAC should be consistent across:
- config objects;
- plugins;
- connectors.
We will likely need separate permission families for:
- creating config objects manually;
- editing cloud/import-managed objects;
- creating plugins;
- attaching objects to plugins;
- creating connector definitions;
- configuring connector instances;
- binding connector instances to plugins;
- approving connector ingestion or sync behavior;
- managing delivery visibility to users.
Open question:
- plugin "publish/release" may not be a separate workflow; in practice, delivery may just mean changing access permissions, such as granting a team access to a plugin.
## Provenance Requirements
We need strong provenance for connector-managed content.
At minimum, OpenWork should know:
- how the object was created;
- which connector instance created it;
- the external source address;
- the mapping that classified it;
- the external revision that produced the current local version;
- whether the object is active, deleted, stale, or out of sync.
For GitHub-like sources, we likely also need:
- app installation or account binding;
- repo owner/name;
- branch;
- path;
- commit SHA for each ingested version;
- webhook delivery or event metadata for debugging.
## Lifecycle States
We likely need soft lifecycle states instead of hard deletes.
Candidate statuses:
- active
- inactive
- deleted
- archived
- ingestion_error
Open question:
- whether `deleted` should mean source removed, while `archived` means intentionally retired by an admin.
## Compatibility With Current System
Today:
- skills are the only first-class sharable object in Den;
- hubs are team/member-access-controlled groupings of skills;
- the app downloads and installs individual skills, not durable plugin bundles.
Future:
- skills become one config object type among many;
- hubs disappear from the product model;
- plugins become the administrator-authored deliverable;
- connectors can automatically populate plugins;
- delivery/install rules move up from individual skills to plugin-aware distribution.
## Data Structure
The implementation-oriented schema and data-model details now live in:
- `prds/new-plugin-arch/datastructure.md`
- `prds/new-plugin-arch/rbac.md`
That document currently captures:
- the proposed `config_object` and `config_object_version` split;
- plugin membership tables;
- RBAC table direction;
- connector/account/instance/mapping/source-binding tables;
- latest-version lookup strategy;
- current metadata projection rules for dashboard search;
- optional release/install tables.
Key current data-model decisions:
- `config_object_version` is the single source of truth for version history and latest-version resolution;
- `config_object` is the current searchable projection for dashboard and library queries;
- latest version is resolved by `created_at DESC, id DESC`, not a stored latest pointer;
- v1 does not need a version-number column;
- connector source tombstones preserve deleted path history so recreated files get new identities.
## Immediate Open Questions
### Identity and typing
- Is `custom` just a catch-all typed blob, or does it need subtyping?
- Can one config object belong to many plugins? Current assumption: yes.
### Versioning
- v1 does not need explicit version numbers; immutable version ids plus `created_at` are enough.
- Do we need explicit human-facing revision numbers later for UX, debugging, or APIs?
- Plugins point to object identities and always resolve latest versions.
- Do we want plugin releases as a first-class concept separate from object versions?
### Connector ownership
- Connector-managed objects should still allow local edits for authorized users, knowing connector sync may overwrite content later.
- Membership, tags, and RBAC remain locally managed.
- Do we need conflict indicators when local edits diverge from the connector source before the next overwrite?
### Deletion and recreation
- When a connector file is deleted, should the object immediately disappear from active delivery, or remain selectable with warnings?
- If a file reappears at the same path, it should create a new object identity regardless of content.
- We likely need tombstones for deleted external paths so we do not accidentally revive prior identities.
### RBAC
- Who can create cloud-native config objects?
- Who can configure connector types vs connector instances?
- Who can bind connector instances to plugins?
- Who can override connector-derived membership or metadata?
### Default connector UX vs advanced composition
- How rigid should the default parent-path -> plugin mapping be?
- Should manual plugins be allowed to include connector-managed objects from any path even if the default setup flow keeps path families together?
### Ingestion engine
- Do we normalize all source payloads into one common schema plus type-specific payload blocks?
- How strict is validation on ingestion: reject bad files, partially ingest, or mark them errored?
- How do we surface per-file ingestion failures to admins?
### Delivery model
- Are plugins downloaded as full bundles, as selected object manifests, or as live subscriptions?
- How does install/update behavior differ by object type?
- How do clients know which version of each object or plugin they have?
## Current Recommendations
- Introduce a stable identity table plus immutable version rows for config objects.
- Treat `config_object_version` as the only source of truth for latest-version lookup in v1.
- Do not add a version-number column in v1 unless product requirements emerge for human-facing revision labels.
- Treat plugins as first-class entities with membership tables.
- Keep source ownership explicit on every config object identity.
- Model connectors as reusable integration definitions plus configured instances.
- Store connector provenance richly enough to debug and reconcile webhook-driven ingestion.
- Use soft deletion and preserve historical membership rather than hard-deleting rows.
## Next Discussion
Next we should define the delivery model:
- how plugins are published to users;
- whether delivery is pinned or rolling;
- how clients install/update each object type;
- how to represent client state and rollout state.

View File

@@ -0,0 +1,488 @@
# New Plugin Arch RBAC
This document captures the first-pass RBAC design for the new plugin architecture.
It is intentionally written as a discussion doc, not a final policy spec.
## Goal
We want one consistent RBAC model across:
- config objects
- plugins
- connector instances
And we also need org-level permissions for who can:
- create and edit those resources
- configure connector ingestion
- manage delivery/access
- view and debug sync state
## Main design principle
There are really two different permission layers:
1. org capability permissions
- what a member is allowed to do in the org in general
2. resource access permissions
- what a member/team can do with a specific config object, plugin, or connector instance
That split is important.
Example:
- a user may have org permission to create plugins;
- but they may still not have access to edit a specific existing plugin.
Locked decision:
- org admins should have implicit full access to every resource in the org.
## Resources that need RBAC
### Org-scoped capability layer
These are not tied to one individual row.
Examples:
- can create config objects
- can create plugins
- can create connector accounts
- can create connector instances
- can manage org-wide access policies
- can view connector sync diagnostics
### Resource-scoped access layer
These apply to individual resources.
Resources:
- one `config_object`
- one `plugin`
- one `connector_instance`
Current recommendation:
- do not make `connector_account` a day-one first-class resource with separate fine-grained RBAC unless we need it;
- use org-level capability permissions for connector account setup, and resource-level permissions for connector instances.
## Actors
The natural principals appear to be:
- org member
- team
Current recommendation:
- grants should be assignable to either a member or a team;
- resources should also be shareable with the org as a whole without creating per-user grants;
- effective access is the union of direct grants plus team grants;
- ownership/creator metadata should not automatically become the only authorization model.
Locked decision:
- config objects and plugins should be private by default.
- sharing should happen through explicit RBAC grants only.
## What we likely need to protect
### Config objects
Actions:
- view metadata
- view current content
- view history
- create new version
- edit current metadata
- archive/delete/restore
- manage object access
- attach/detach from plugins
### Plugins
Actions:
- view metadata
- view resolved members
- edit metadata
- add/remove config objects
- manage plugin access
- view delivery preview/resolved manifest
- create release snapshot if releases exist
### Connector instances
Actions:
- view connector config
- edit connector config
- edit targets and mappings
- manually trigger sync
- view sync events/logs
- manage connector access
- disable/archive connector
## Recommended permission model shape
### Resource roles
For v1, role-based resource access is probably better than dozens of tiny per-action grants.
Recommended shared roles:
- `viewer`
- `editor`
- `manager`
Suggested semantics:
#### `viewer`
- can view resource metadata and current state
- can view resolved contents where applicable
- cannot mutate
#### `editor`
- can modify the resource's working content/config
- cannot manage sharing/access unless explicitly promoted
#### `manager`
- can edit the resource
- can change access grants
- can archive/restore
- can perform high-impact control actions
Current recommendation:
- start with shared roles across resource types, then add per-resource-type nuances in enforcement logic if needed.
## Org capability permissions
These likely belong on org membership roles or org-level grants.
Candidate capabilities:
- `config_object.create`
- `plugin.create`
- `connector_account.create`
- `connector_instance.create`
- `connector_sync.view_all`
- `connector_sync.retry`
- `rbac.manage_org`
Current implementation note:
- until separate org-capability persistence exists, the endpoint layer maps these capabilities to org owner/admin membership only.
Why this matters:
- resource grants alone do not answer "who is allowed to create a new thing?"
- we need an org capability gate before resource-level RBAC even applies.
## Resource access tables
This doc aligns with the table direction in `prds/new-plugin-arch/datastructure.md`.
Current recommendation:
- separate access tables with the same shape:
- `config_object_access_grant`
- `plugin_access_grant`
- `connector_instance_access_grant`
Suggested shared columns:
- `id`
- resource id
- `org_membership_id` nullable
- `team_id` nullable
- `org_wide` boolean default false
- `role`
- `created_by_org_membership_id`
- `created_at`
- `removed_at` nullable
Additional guardrails:
- require exactly one grant target:
- `org_membership_id`
- `team_id`
- or `org_wide = true`
- use soft revocation via `removed_at`
- compute effective role from the strongest active grant
Recommended interpretation:
- `org_wide = true` means "shared with the org";
- this creates org-wide visibility/access without creating rows for every member or team;
- config objects and plugins remain private until such a grant is added.
- member/team sharing still uses normal explicit grant rows.
Locked decision:
- use `org_wide = true` for v1.
## Recommended authorization rules by resource
### Config objects
#### View
Needs one of:
- direct object grant
- team object grant
- object has an active org-wide grant
- access to a plugin that currently includes the object
- org admin implicit access
#### Edit content / create version
Needs one of:
- `editor` or `manager` on the object
#### Manage object access
Needs:
- `manager` on the object
#### Attach object to plugin
Likely needs both:
- edit rights on the object
- edit rights on the target plugin
This is one of the first policy questions we should lock down.
### Plugins
#### View
Needs one of:
- direct plugin grant
- team plugin grant
- plugin has an active org-wide grant
- org admin implicit access
#### Edit plugin metadata / membership
Needs:
- `editor` or `manager` on the plugin
#### Manage plugin access
Needs:
- `manager` on the plugin
### Connector instances
#### View connector setup
Needs:
- direct or team connector-instance grant
- or org admin implicit access
#### Edit mappings / targets / config
Needs:
- `editor` or `manager` on the connector instance
#### Trigger sync / retry sync
Likely needs:
- `editor` or `manager` on the connector instance
- and maybe an org capability for retrying failed syncs if we want tighter control
#### Manage connector access
Needs:
- `manager` on the connector instance
## Delivery and access
Current direction from the other docs:
- plugin delivery is mostly controlled through plugin access grants.
That means:
- if team B has access to plugin A, that is effectively the publish step;
- the delivery system should resolve access from plugin grants, not from low-level config-object grants alone.
Current recommendation:
- plugin delivery should primarily check plugin access;
- config-object access should govern direct admin/editing access, not plugin delivery;
- a user should have access to a config object if any of the following are true:
- they are directly granted access to the object
- they are on a team granted access to the object
- the object has an org-wide grant
- the object is included in a plugin they can access
- they are an org admin
This keeps the mental model simpler:
- plugins are the deliverable
- plugin access determines who gets the deliverable
## Connector-managed objects and RBAC
We already decided:
- connector-managed objects can still be edited locally;
- connector sync remains authoritative and may overwrite those edits later.
RBAC implication:
- connector origin should not automatically make an object read-only from an authorization perspective;
- authorization should still be based on the object's grants and org capabilities.
Locked decision:
- when a connector auto-creates an object, the creator should be the user who configured the connector behavior that caused the creation;
- after creation, the object follows normal permissions like any other object.
But we may want UX warnings when:
- a user has permission to edit;
- but their change is likely to be overwritten by the connector.
## Cross-resource mutation questions
These are the trickiest RBAC cases.
### Add object to plugin
Question:
- should a user need edit permission on both the object and the plugin?
Current recommendation:
- no.
Reason:
- plugin composition is controlled by the plugin;
- users only need edit rights on the plugin to add or remove objects from it.
### Bind connector mapping to plugin
Question:
- should a user need edit permission on both the connector instance and the plugin?
Current recommendation:
- yes.
### Auto-created objects from a connector mapping
Question:
- when a connector mapping auto-creates new objects and auto-adds them to a plugin, whose permission is that acting under?
Current recommendation:
- treat it as an automated action attributed to the human creator of the connector/mapping configuration that caused it.
This should be auditable as:
- `created_via = connector`
- `created_by_org_membership_id = <connector or mapping creator>`
## Inheritance and default access
This is still a major open area.
Questions:
- when a new config object is manually created, who gets initial access?
- when a new plugin is created, who gets initial access?
- should creator always get `manager`?
Current recommendation:
- creator gets initial `manager` grant;
- connector-created objects also default to the creator of the relevant connector/mapping action;
- org owners/admins have implicit override access across all resources;
- teams should only gain access through explicit grants, not automatic inheritance.
Locked decision:
- config objects and plugins are private by default;
- users can share with individual members, teams, or the org as a whole;
- whole-org sharing should not create per-user or per-team rows.
## Suggested v1 defaults
If we need a practical starting point now:
- org owner/admin
- implicit full access to all plugin-arch resources in the org
- creator of a resource
- explicit or implicit `manager` on that resource
- connector-created resource
- creator is the user whose authorized connector/mapping configuration caused creation
- team/member grants
- explicit only
- org-wide share
- explicit only, via one grant that applies to the org as a whole
- delivery
- controlled by plugin access grants
- object/plugin/connector roles
- `viewer`, `editor`, `manager`
## API implications
The API surface in `prds/new-plugin-arch/admin-api.md` should assume:
- object access endpoints manage `config_object_access_grant`
- plugin access endpoints manage `plugin_access_grant`
- connector instance access endpoints manage `connector_instance_access_grant`
The API should also distinguish between:
- `403 forbidden because you lack org capability`
- `403 forbidden because you lack resource access`
That distinction will help a lot with admin UX.
## Locked decisions so far
1. Org admins have implicit full access to every resource in the org.
2. `viewer` / `editor` / `manager` are enough for v1.
3. Adding an object to a plugin only requires edit rights on the plugin.
4. Connector-created objects should attribute creation to the relevant connector/mapping creator, then follow normal object permissions.
5. Plugin delivery checks plugin access, not per-item access.
6. A user can access a config object if they are directly granted, team-granted, org-wide granted, or it is included in a plugin they can access.
7. Default grants for connector auto-created objects should go to the creator.
8. Config objects and plugins are private by default.
9. Sharing with the whole org should be represented as one org-wide grant, not per-user entries.
10. Member and team sharing should continue to use explicit grant rows.
## Discussion questions
These are the main questions still worth answering next.
1. Should binding a connector mapping to a plugin require edit rights on both the connector instance and the plugin?
2. Should connector-instance managers automatically receive grants on connector-created objects, or only the original creator plus normal explicit grants?
3. Should plugin managers be able to include any visible object in a plugin, or only objects they can directly edit/view?
4. Should there be extra restrictions on who can manage access for encrypted/high-sensitivity object types like MCPs?

View File

@@ -0,0 +1,149 @@
# New Plugin Arch Webhooks API
This document covers public webhook ingress and internal async sync trigger shapes.
Normal authenticated admin APIs are documented in `prds/new-plugin-arch/admin-api.md`.
## Principles
- public webhooks are separate from authenticated admin APIs;
- signature verification happens before parsing or processing;
- ingress should be fast and queue-driven;
- connector-specific reconciliation happens asynchronously.
## Public webhook endpoints
### GitHub webhook ingress
- `POST /api/webhooks/connectors/github`
Purpose:
- receive GitHub App webhook deliveries for connector sync and connector lifecycle events.
Expected request inputs:
- raw request body
- `X-Hub-Signature-256`
- `X-GitHub-Event`
- `X-GitHub-Delivery`
Behavior:
1. verify signature against raw body
2. reject invalid/missing signatures with `401`
3. normalize the webhook envelope
4. dispatch by event type
5. enqueue sync jobs for relevant branch updates
6. return quickly
Recommended responses:
- `401 Unauthorized` for invalid signature
- `202 Accepted` for relevant accepted events
- `200 OK` for valid but ignored events
## Internal webhook dispatch
Recommended normalized envelope:
```ts
type GithubWebhookEnvelope = {
deliveryId: string
event: string
installationId?: number
repositoryId?: number
repositoryFullName?: string
ref?: string
headSha?: string
payload: unknown
}
```
Recommended internal handlers:
- `githubWebhookIngress()`
- `handleGithubPushEvent()`
- `handleGithubInstallationEvent()`
- `handleGithubInstallationRepositoriesEvent()`
- `handleGithubRepositoryEvent()` optional later
## Signature verification shape
Requirements:
- use the GitHub App webhook secret;
- compute HMAC SHA-256 from the raw body;
- compare using constant-time comparison;
- do not parse JSON before verification;
- do not process if signature fails.
Practical rule:
- no signature match, no webhook processing.
Example pseudocode:
```ts
async function githubWebhookIngress(req: Request) {
const rawBody = await req.text()
const signature = req.headers.get("x-hub-signature-256")
if (!signature) return new Response("missing signature", { status: 401 })
const expected = signGithubBody(rawBody, env.GITHUB_CONNECTOR_APP_WEBHOOK_SECRET)
if (!timingSafeEqual(signature, expected)) {
return new Response("invalid signature", { status: 401 })
}
const event = req.headers.get("x-github-event")
const deliveryId = req.headers.get("x-github-delivery")
const payload = JSON.parse(rawBody)
return dispatchGithubWebhook({ event, deliveryId, payload })
}
```
## Internal sync enqueue contract
For relevant push events on tracked branches, enqueue an async sync job.
Recommended payload shape:
```ts
type GithubConnectorSyncJob = {
connectorType: "github"
connectorInstanceId: string
connectorTargetId: string
connectorSyncEventId: string
deliveryId: string
installationId: number
repositoryId: number
repositoryFullName: string
ref: string
headSha: string
}
```
Recommendations:
- dedupe on `connectorTargetId + headSha`
- preserve `deliveryId` for observability
- allow internal retries without requiring webhook redelivery
- installation lifecycle events may update connector-account health/state without enqueuing a content reconciliation job
## Worker result contract
The worker should update `connector_sync_event` with:
- final status (`completed`, `failed`, `partial`)
- target id
- head SHA
- created / updated / deleted / skipped counts
- per-file failures if any
- completed timestamp
## Related docs
- `prds/new-plugin-arch/GitHub-connector.md`
- `prds/new-plugin-arch/admin-api.md`