diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 0b7ab72937..66e07e303a 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -14,7 +14,17 @@ "Bash(mv:*)", "Bash(ls:*)", "mcp__ide__getDiagnostics", - "Bash(pnpm --filter \"*connect*\" test connect-status-writer.service.spec)" + "Bash(pnpm --filter \"*connect*\" test connect-status-writer.service.spec)", + "Bash(pnpm add:*)", + "Bash(npx tsc:*)", + "Bash(pnpm list:*)", + "Bash(rm:*)", + "Bash(pnpm --filter ./api test)", + "Bash(pnpm i:*)", + "Bash(pnpm:*)", + "Bash(corepack prepare:*)", + "Bash(nvm:*)", + "Bash(git config:*)" ] }, "enableAllProjectMcpServers": false diff --git a/api/.env.development b/api/.env.development index 949bdc97ea..4c83c7ed89 100644 --- a/api/.env.development +++ b/api/.env.development @@ -15,6 +15,7 @@ PATHS_ACTIVATION_BASE=./dev/activation PATHS_PASSWD=./dev/passwd PATHS_RCLONE_SOCKET=./dev/rclone-socket PATHS_LOG_BASE=./dev/log # Where we store logs +PATHS_BACKUP_JOBS=./dev/api/backup ENVIRONMENT="development" NODE_ENV="development" PORT="3001" @@ -26,4 +27,4 @@ BYPASS_PERMISSION_CHECKS=false BYPASS_CORS_CHECKS=true CHOKIDAR_USEPOLLING=true LOG_TRANSPORT=console -LOG_LEVEL=trace +LOG_LEVEL=debug # Change to trace for extremely noisy logging diff --git a/api/.eslintrc.ts b/api/.eslintrc.ts index 6f224624d1..2809907765 100644 --- a/api/.eslintrc.ts +++ b/api/.eslintrc.ts @@ -53,5 +53,5 @@ export default tseslint.config(eslint.configs.recommended, ...tseslint.configs.r 'eol-last': ['error', 'always'], }, - ignores: ['src/graphql/generated/client/**/*'], + ignores: ['src/graphql/generated/client/**/*', 'scripts/**/*'], }); diff --git a/api/dev/api/backup/backup-jobs.json b/api/dev/api/backup/backup-jobs.json new file mode 100644 index 0000000000..e19ecac7d8 --- /dev/null +++ b/api/dev/api/backup/backup-jobs.json @@ -0,0 +1,25 @@ +[ + { + "id": "a68667b6-f4ef-4c47-aec3-d9886be78487", + "name": "Test", + "sourceType": "RAW", + "destinationType": "RCLONE", + "schedule": "0 2 * * *", + "enabled": true, + "sourceConfig": { + "label": "Raw file backup", + "sourcePath": "/Users/elibosley/Desktop", + "excludePatterns": [], + "includePatterns": [] + }, + "destinationConfig": { + "type": "RCLONE", + "remoteName": "google_drives", + "destinationPath": "desktop" + }, + "createdAt": "2025-05-27T15:02:31.655Z", + "updatedAt": "2025-05-27T15:11:40.547Z", + "lastRunAt": "2025-05-27T15:07:37.139Z", + "lastRunStatus": "Failed: RClone group backup-job_1748358397105_sbo5j322k failed or timed out." + } +] \ No newline at end of file diff --git a/api/dev/configs/api.json b/api/dev/configs/api.json index 6664e3ecbf..a0c013c398 100644 --- a/api/dev/configs/api.json +++ b/api/dev/configs/api.json @@ -1,10 +1,12 @@ { - "version": "4.8.0", + "version": "4.9.5", "extraOrigins": [ "https://google.com", "https://test.com" ], "sandbox": true, "ssoSubIds": [], - "plugins": ["unraid-api-plugin-connect"] + "plugins": [ + "unraid-api-plugin-connect" + ] } \ No newline at end of file diff --git a/api/dev/states/myservers.cfg b/api/dev/states/myservers.cfg index c8a8701d15..5324fcaa8a 100644 --- a/api/dev/states/myservers.cfg +++ b/api/dev/states/myservers.cfg @@ -1,5 +1,5 @@ [api] -version="4.4.1" +version="4.8.0" extraOrigins="https://google.com,https://test.com" [local] sandbox="yes" diff --git a/api/generated-schema.graphql b/api/generated-schema.graphql index 56dda378a5..ae0fc13ca1 100644 --- a/api/generated-schema.graphql +++ b/api/generated-schema.graphql @@ -598,6 +598,7 @@ enum Resource { ACTIVATION_CODE API_KEY ARRAY + BACKUP CLOUD CONFIG CONNECT @@ -653,6 +654,63 @@ type ApiKeyWithSecret implements Node { key: String! } +type JobStatus implements Node { + id: PrefixedID! + + """External job ID from the job execution system""" + externalJobId: String! + name: String! + status: BackupJobStatus! + + """Progress percentage (0-100)""" + progress: Int! + message: String + error: String + startTime: DateTime! + endTime: DateTime + + """Bytes transferred""" + bytesTransferred: Int + + """Total bytes to transfer""" + totalBytes: Int + + """Transfer speed in bytes per second""" + speed: Int + + """Elapsed time in seconds""" + elapsedTime: Int + + """Estimated time to completion in seconds""" + eta: Int + + """Human-readable bytes transferred""" + formattedBytesTransferred: String + + """Human-readable transfer speed""" + formattedSpeed: String + + """Human-readable elapsed time""" + formattedElapsedTime: String + + """Human-readable ETA""" + formattedEta: String +} + +"""Status of a backup job""" +enum BackupJobStatus { + QUEUED + RUNNING + COMPLETED + FAILED + CANCELLED +} + +""" +A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. +""" +scalar DateTime + type RCloneDrive { """Provider name""" name: String! @@ -693,6 +751,98 @@ type RCloneRemote { config: JSON! } +type RCloneJobStats { + """Bytes transferred""" + bytes: Float + + """Transfer speed in bytes/sec""" + speed: Float + + """Estimated time to completion in seconds""" + eta: Float + + """Elapsed time in seconds""" + elapsedTime: Float + + """Progress percentage (0-100)""" + percentage: Float + + """Number of checks completed""" + checks: Float + + """Number of deletes completed""" + deletes: Float + + """Number of errors encountered""" + errors: Float + + """Whether a fatal error occurred""" + fatalError: Boolean + + """Last error message""" + lastError: String + + """Number of renames completed""" + renames: Float + + """Whether there is a retry error""" + retryError: Boolean + + """Number of server-side copies""" + serverSideCopies: Float + + """Bytes in server-side copies""" + serverSideCopyBytes: Float + + """Number of server-side moves""" + serverSideMoves: Float + + """Bytes in server-side moves""" + serverSideMoveBytes: Float + + """Total bytes to transfer""" + totalBytes: Float + + """Total checks to perform""" + totalChecks: Float + + """Total transfers to perform""" + totalTransfers: Float + + """Time spent transferring in seconds""" + transferTime: Float + + """Number of transfers completed""" + transfers: Float + + """Currently transferring files""" + transferring: JSON + + """Currently checking files""" + checking: JSON + + """Human-readable bytes transferred""" + formattedBytes: String + + """Human-readable transfer speed""" + formattedSpeed: String + + """Human-readable elapsed time""" + formattedElapsedTime: String + + """Human-readable ETA""" + formattedEta: String + + """Calculated percentage (fallback when percentage is null)""" + calculatedPercentage: Float + + """Whether the job is actively running""" + isActivelyRunning: Boolean + + """Whether the job is completed""" + isCompleted: Boolean +} + type ArrayMutations { """Set array state""" setState(input: ArrayStateInput!): UnraidArray! @@ -764,6 +914,186 @@ type VmMutations { reset(id: PrefixedID!): Boolean! } +"""Backup related mutations""" +type BackupMutations { + """Create a new backup job configuration""" + createBackupJobConfig(input: CreateBackupJobConfigInput!): BackupJobConfig! + + """Update a backup job configuration""" + updateBackupJobConfig(id: PrefixedID!, input: UpdateBackupJobConfigInput!): BackupJobConfig + + """Delete a backup job configuration""" + deleteBackupJobConfig(id: PrefixedID!): Boolean! + + """Initiates a backup using a configured remote.""" + initiateBackup(input: InitiateBackupInput!): BackupStatus! + + """Toggle a backup job configuration enabled/disabled""" + toggleJobConfig(id: PrefixedID!): BackupJobConfig + + """Manually trigger a backup job using existing configuration""" + triggerJob(id: PrefixedID!): BackupStatus! + + """Stop all running backup jobs""" + stopAllBackupJobs: BackupStatus! + + """Stop a specific backup job""" + stopBackupJob(id: PrefixedID!): BackupStatus! + + """Forget all finished backup jobs to clean up the job list""" + forgetFinishedBackupJobs: BackupStatus! +} + +input CreateBackupJobConfigInput { + name: String! + schedule: String + enabled: Boolean! = true + + """Source configuration for this backup job""" + sourceConfig: SourceConfigInput + + """Destination configuration for this backup job""" + destinationConfig: DestinationConfigInput +} + +input SourceConfigInput { + type: SourceType! + + """Timeout for backup operation in seconds""" + timeout: Float! = 3600 + + """Whether to cleanup on failure""" + cleanupOnFailure: Boolean! = true + zfsConfig: ZfsPreprocessConfigInput + flashConfig: FlashPreprocessConfigInput + scriptConfig: ScriptPreprocessConfigInput + rawConfig: RawBackupConfigInput +} + +""" +Type of backup to perform (ZFS snapshot, Flash backup, Custom script, or Raw file backup) +""" +enum SourceType { + ZFS + FLASH + SCRIPT + RAW +} + +input ZfsPreprocessConfigInput { + """Human-readable label for this source configuration""" + label: String + + """ZFS pool name""" + poolName: String! + + """Dataset name within the pool""" + datasetName: String! + + """Snapshot name prefix""" + snapshotPrefix: String + + """Whether to cleanup snapshots after backup""" + cleanupSnapshots: Boolean! = true + + """Number of snapshots to retain""" + retainSnapshots: Float +} + +input FlashPreprocessConfigInput { + """Human-readable label for this source configuration""" + label: String + + """Flash drive mount path""" + flashPath: String! = "/boot" + + """Whether to include git history""" + includeGitHistory: Boolean! = true + + """Additional paths to include in backup""" + additionalPaths: [String!] +} + +input ScriptPreprocessConfigInput { + """Human-readable label for this source configuration""" + label: String + + """Path to the script file""" + scriptPath: String! + + """Arguments to pass to the script""" + scriptArgs: [String!] + + """Working directory for script execution""" + workingDirectory: String + + """Environment variables for script execution""" + environment: JSON + + """Output file path where script should write data""" + outputPath: String! +} + +input RawBackupConfigInput { + """Human-readable label for this source configuration""" + label: String + + """Source path to backup""" + sourcePath: String! + + """File patterns to exclude from backup""" + excludePatterns: [String!] + + """File patterns to include in backup""" + includePatterns: [String!] +} + +input DestinationConfigInput { + type: DestinationType! + rcloneConfig: RcloneDestinationConfigInput +} + +enum DestinationType { + RCLONE +} + +input RcloneDestinationConfigInput { + remoteName: String! + destinationPath: String! + rcloneOptions: JSON +} + +input UpdateBackupJobConfigInput { + name: String + schedule: String + enabled: Boolean + + """Source configuration for this backup job""" + sourceConfig: SourceConfigInput + + """Destination configuration for this backup job""" + destinationConfig: DestinationConfigInput + lastRunStatus: String + lastRunAt: String + currentJobId: String +} + +input InitiateBackupInput { + """The name of the remote configuration to use for the backup.""" + remoteName: String! + + """Source path to backup.""" + sourcePath: String! + + """Destination path on the remote.""" + destinationPath: String! + + """ + Additional options for the backup operation, such as --dry-run or --transfers. + """ + options: JSON +} + """API Key related mutations""" type ApiKeyMutations { """Create an API key""" @@ -886,10 +1216,125 @@ type ParityCheck { running: Boolean } +type FlashPreprocessConfig { + label: String! + flashPath: String! + includeGitHistory: Boolean! + additionalPaths: [String!] +} + +type RawBackupConfig { + label: String! + sourcePath: String! + excludePatterns: [String!] + includePatterns: [String!] +} + +type ScriptPreprocessConfig { + label: String! + scriptPath: String! + scriptArgs: [String!] + workingDirectory: String + environment: JSON + outputPath: String! +} + +type ZfsPreprocessConfig { + label: String! + poolName: String! + datasetName: String! + snapshotPrefix: String + cleanupSnapshots: Boolean! + retainSnapshots: Float +} + +type Backup implements Node { + id: PrefixedID! + jobs: [JobStatus!]! + configs: [BackupJobConfig!]! + + """Get the status for the backup service""" + status: BackupStatus! +} + +type BackupStatus { + """Status message indicating the outcome of the backup initiation.""" + status: String! + + """Job ID if available, can be used to check job status.""" + jobId: String +} + +type BackupJobConfig implements Node { + id: PrefixedID! + + """Human-readable name for this backup job""" + name: String! + + """Type of the backup source""" + sourceType: SourceType! + + """Type of the backup destination""" + destinationType: DestinationType! + + """Cron schedule expression (e.g., "0 2 * * *" for daily at 2AM)""" + schedule: String! + + """Whether this backup job is enabled""" + enabled: Boolean! + + """Source configuration for this backup job""" + sourceConfig: SourceConfigUnion! + + """Destination configuration for this backup job""" + destinationConfig: DestinationConfigUnion! + + """When this config was created""" + createdAt: DateTimeISO! + + """When this config was last updated""" + updatedAt: DateTimeISO! + + """Last time this job ran""" + lastRunAt: DateTimeISO + + """Status of last run""" + lastRunStatus: String + + """Current running job ID for this config""" + currentJobId: String + + """Get the current running job for this backup config""" + currentJob: JobStatus +} + +union SourceConfigUnion = ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig + +union DestinationConfigUnion = RcloneDestinationConfig + +type RcloneDestinationConfig { + type: String! + + """Remote name from rclone config""" + remoteName: String! + + """Destination path on the remote""" + destinationPath: String! + + """RClone options (e.g., --transfers, --checkers)""" + rcloneOptions: JSON +} + """ -A date-time string at UTC, such as 2019-12-03T09:54:33Z, compliant with the date-time format. +A date-time string at UTC, such as 2007-12-03T10:15:30Z, compliant with the `date-time` format outlined in section 5.6 of the RFC 3339 profile of the ISO 8601 standard for representation of dates and times using the Gregorian calendar.This scalar is serialized to a string in ISO 8601 format and parsed from a string in ISO 8601 format. """ -scalar DateTime +scalar DateTimeISO + +type BackupJobConfigForm { + id: PrefixedID! + dataSchema: JSON! + uiSchema: JSON! +} type Config implements Node { id: PrefixedID! @@ -1248,14 +1693,6 @@ type Docker implements Node { networks(skipCache: Boolean! = false): [DockerNetwork!]! } -type FlashBackupStatus { - """Status message indicating the outcome of the backup initiation.""" - status: String! - - """Job ID if available, can be used to check job status.""" - jobId: String -} - type Flash implements Node { id: PrefixedID! guid: String! @@ -1658,13 +2095,27 @@ type Query { vms: Vms! parityHistory: [ParityCheck!]! array: UnraidArray! + + """Get backup service information""" + backup: Backup! + + """Get a specific backup job configuration""" + backupJobConfig(id: PrefixedID!): BackupJobConfig + + """Get status of a specific backup job""" + backupJob(id: PrefixedID!): JobStatus + + """Get the JSON schema for backup job configuration form""" + backupJobConfigForm(input: BackupJobConfigFormInput): BackupJobConfigForm! + backupJobStatus(jobId: PrefixedID!): JobStatus + allBackupJobStatuses: [JobStatus!]! + rclone: RCloneBackupSettings! customization: Customization publicPartnerInfo: PublicPartnerInfo publicTheme: Theme! docker: Docker! disks: [Disk!]! disk(id: PrefixedID!): Disk! - rclone: RCloneBackupSettings! settings: Settings! isSSOEnabled: Boolean! @@ -1676,6 +2127,10 @@ type Query { cloud: Cloud! } +input BackupJobConfigFormInput { + showAdvanced: Boolean! = false +} + type Mutation { """Creates a new notification record""" createNotification(input: NotificationData!): Notification! @@ -1699,12 +2154,10 @@ type Mutation { array: ArrayMutations! docker: DockerMutations! vm: VmMutations! + backup: BackupMutations! parityCheck: ParityCheckMutations! apiKey: ApiKeyMutations! rclone: RCloneMutations! - - """Initiates a flash drive backup using a configured remote.""" - initiateFlashBackup(input: InitiateFlashBackupInput!): FlashBackupStatus! updateSettings(input: JSON!): UpdateSettingsResponse! """ @@ -1731,22 +2184,6 @@ input NotificationData { link: String } -input InitiateFlashBackupInput { - """The name of the remote configuration to use for the backup.""" - remoteName: String! - - """Source path to backup (typically the flash drive).""" - sourcePath: String! - - """Destination path on the remote.""" - destinationPath: String! - - """ - Additional options for the backup operation, such as --dry-run or --transfers. - """ - options: JSON -} - input PluginManagementInput { """Array of plugin package names to add or remove""" names: [String!]! diff --git a/api/package.json b/api/package.json index 8804223a19..25ca3c9a7b 100644 --- a/api/package.json +++ b/api/package.json @@ -94,7 +94,7 @@ "command-exists": "1.2.9", "convert": "5.12.0", "cookie": "1.0.2", - "cron": "4.3.1", + "cron": "4.3.0", "cross-fetch": "4.1.0", "diff": "8.0.2", "dockerode": "4.0.7", @@ -192,6 +192,7 @@ "@types/wtfnode": "0.7.3", "@vitest/coverage-v8": "3.2.4", "@vitest/ui": "3.2.4", + "commit-and-tag-version": "9.6.0", "cz-conventional-changelog": "3.3.0", "eslint": "9.30.1", "eslint-plugin-import": "2.32.0", @@ -203,7 +204,6 @@ "nodemon": "3.1.10", "prettier": "3.6.2", "rollup-plugin-node-externals": "8.0.1", - "commit-and-tag-version": "9.6.0", "tsx": "4.20.3", "type-fest": "4.41.0", "typescript": "5.8.3", @@ -225,7 +225,8 @@ "nest-authz": { "@nestjs/common": "$@nestjs/common", "@nestjs/core": "$@nestjs/core" - } + }, + "cron": "4.3.1" }, "private": true, "packageManager": "pnpm@10.13.1" diff --git a/api/src/__test__/graphql/resolvers/rclone-api.service.test.ts b/api/src/__test__/graphql/resolvers/rclone-api.service.test.ts index a9c7415360..0dfb6fe524 100644 --- a/api/src/__test__/graphql/resolvers/rclone-api.service.test.ts +++ b/api/src/__test__/graphql/resolvers/rclone-api.service.test.ts @@ -11,6 +11,7 @@ import { RCloneStartBackupInput, UpdateRCloneRemoteDto, } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import { FormatService } from '@app/unraid-api/utils/format.service.js'; vi.mock('got'); vi.mock('execa'); @@ -55,6 +56,8 @@ describe('RCloneApiService', () => { let mockExeca: any; let mockPRetry: any; let mockExistsSync: any; + let mockFormatService: FormatService; + let mockCacheManager: any; beforeEach(async () => { vi.clearAllMocks(); @@ -69,18 +72,67 @@ describe('RCloneApiService', () => { mockPRetry = vi.mocked(pRetry.default); mockExistsSync = vi.mocked(existsSync); - mockGot.post = vi.fn().mockResolvedValue({ body: {} }); - mockExeca.mockReturnValue({ - on: vi.fn(), - kill: vi.fn(), - killed: false, - pid: 12345, - } as any); + mockGot.post = vi.fn().mockImplementation((url: string) => { + // Mock the core/pid call to indicate socket is running + if (url.includes('core/pid')) { + return Promise.resolve({ body: { pid: 12345 } }); + } + return Promise.resolve({ body: {} }); + }); + // Mock execa to return a resolved promise for rclone version check + mockExeca.mockImplementation((cmd: string, args: string[]) => { + if (cmd === 'rclone' && args[0] === 'version') { + return Promise.resolve({ stdout: 'rclone v1.67.0', stderr: '', exitCode: 0 } as any); + } + return { + on: vi.fn(), + kill: vi.fn(), + killed: false, + pid: 12345, + } as any; + }); mockPRetry.mockResolvedValue(undefined); - mockExistsSync.mockReturnValue(false); - - service = new RCloneApiService(); - await service.onModuleInit(); + // Mock socket exists + mockExistsSync.mockReturnValue(true); + + mockFormatService = { + formatBytes: vi.fn(), + formatDuration: vi.fn(), + } as any; + + // Mock RCloneStatusService + const mockStatusService = { + enhanceStatsWithFormattedFields: vi.fn(), + transformStatsToJob: vi.fn(), + calculateCombinedStats: vi.fn(), + parseActiveJobs: vi.fn(), + parseBackupStatus: vi.fn(), + parseJobWithStats: vi.fn(), + parseAllJobsWithStats: vi.fn(), + parseJobsWithStats: vi.fn(), + getBackupStatus: vi.fn(), + } as any; + + // Mock StreamingJobManager + const mockStreamingJobManager = { + startJob: vi.fn(), + stopJob: vi.fn(), + getJobStatus: vi.fn(), + getAllJobs: vi.fn(), + } as any; + + // Mock cache manager + mockCacheManager = { + get: vi.fn().mockResolvedValue(null), + set: vi.fn().mockResolvedValue(undefined), + del: vi.fn().mockResolvedValue(undefined), + }; + + service = new RCloneApiService(mockStatusService); + // Mock the service as initialized without actually running onModuleInit + // to avoid the initialization API calls + (service as any).initialized = true; + (service as any).rcloneBaseUrl = 'http://unix:/tmp/rclone.sock:'; }); describe('getProviders', () => { @@ -248,6 +300,9 @@ describe('RCloneApiService', () => { options: { delete_on: 'dst' }, }; const mockResponse = { jobid: 'job-123' }; + + // Clear previous mock calls and set up fresh mock + mockGot.post.mockClear(); mockGot.post.mockResolvedValue({ body: mockResponse }); const result = await service.startBackup(input); @@ -256,11 +311,11 @@ describe('RCloneApiService', () => { expect(mockGot.post).toHaveBeenCalledWith( 'http://unix:/tmp/rclone.sock:/sync/copy', expect.objectContaining({ - json: { + json: expect.objectContaining({ srcFs: '/source/path', dstFs: 'remote:backup/path', delete_on: 'dst', - }, + }), }) ); }); @@ -269,8 +324,22 @@ describe('RCloneApiService', () => { describe('getJobStatus', () => { it('should return job status', async () => { const input: GetRCloneJobStatusDto = { jobId: 'job-123' }; - const mockStatus = { status: 'running', progress: 0.5 }; - mockGot.post.mockResolvedValue({ body: mockStatus }); + const mockStatus = { id: 'job-123', status: 'running', progress: 0.5 }; + mockGot.post.mockImplementation((url: string) => { + if (url.includes('core/stats')) { + return Promise.resolve({ body: {} }); + } + if (url.includes('job/status')) { + return Promise.resolve({ body: mockStatus }); + } + return Promise.resolve({ body: {} }); + }); + + // Mock the status service methods + const mockStatusService = (service as any).statusService; + mockStatusService.enhanceStatsWithFormattedFields = vi.fn().mockReturnValue({}); + mockStatusService.transformStatsToJob = vi.fn().mockReturnValue(null); + mockStatusService.parseJobWithStats = vi.fn().mockReturnValue(mockStatus); const result = await service.getJobStatus(input); @@ -335,7 +404,7 @@ describe('RCloneApiService', () => { mockGot.post.mockRejectedValue(httpError); await expect(service.getProviders()).rejects.toThrow( - 'Rclone API Error (config/providers, HTTP 404): Failed to process error response body. Raw body:' + 'Rclone API Error (config/providers, HTTP 404): Failed to process error response: ' ); }); @@ -352,7 +421,7 @@ describe('RCloneApiService', () => { mockGot.post.mockRejectedValue(httpError); await expect(service.getProviders()).rejects.toThrow( - 'Rclone API Error (config/providers, HTTP 400): Failed to process error response body. Raw body: invalid json' + 'Rclone API Error (config/providers, HTTP 400): Failed to process error response: invalid json' ); }); @@ -367,7 +436,7 @@ describe('RCloneApiService', () => { mockGot.post.mockRejectedValue('unknown error'); await expect(service.getProviders()).rejects.toThrow( - 'Unknown error calling RClone API (config/providers) with params {}: unknown error' + 'Unknown error calling RClone API (config/providers): unknown error' ); }); }); diff --git a/api/src/__test__/store/modules/__snapshots__/paths.test.ts.snap b/api/src/__test__/store/modules/__snapshots__/paths.test.ts.snap index e34dd1e54f..c56def1632 100644 --- a/api/src/__test__/store/modules/__snapshots__/paths.test.ts.snap +++ b/api/src/__test__/store/modules/__snapshots__/paths.test.ts.snap @@ -31,6 +31,7 @@ exports[`Returns paths 1`] = ` "activationBase", "webGuiBase", "identConfig", + "backupBase", "activation", "boot", "webgui", diff --git a/api/src/store/modules/paths.ts b/api/src/store/modules/paths.ts index 3a70d38e7e..45ae09421b 100644 --- a/api/src/store/modules/paths.ts +++ b/api/src/store/modules/paths.ts @@ -71,6 +71,7 @@ const initialState = { ), webGuiBase: '/usr/local/emhttp/webGui' as const, identConfig: resolvePath(process.env.PATHS_IDENT_CONFIG ?? ('/boot/config/ident.cfg' as const)), + backupBase: resolvePath(process.env.PATHS_BACKUP_JOBS ?? ('/boot/config/api/backup/' as const)), }; // Derive asset paths from base paths diff --git a/api/src/unraid-api/auth/auth.service.ts b/api/src/unraid-api/auth/auth.service.ts index 01ca1a9717..8671a94507 100644 --- a/api/src/unraid-api/auth/auth.service.ts +++ b/api/src/unraid-api/auth/auth.service.ts @@ -75,7 +75,7 @@ export class AuthService { // Now get the updated roles const existingRoles = await this.authzService.getRolesForUser(user.id); - this.logger.debug(`User ${user.id} has roles: ${existingRoles}`); + this.logger.verbose(`User ${user.id} has roles: ${existingRoles}`); return user; } catch (error: unknown) { @@ -213,7 +213,7 @@ export class AuthService { ...rolesToRemove.map((role) => this.authzService.deleteRoleForUser(userId, role)), ]); - this.logger.debug( + this.logger.verbose( `Synced roles for user ${userId}. Added: ${rolesToAdd.join( ',' )}, Removed: ${rolesToRemove.join(',')}` @@ -234,7 +234,6 @@ export class AuthService { * @returns a service account that represents the user session (i.e. a webgui user). */ async getSessionUser(): Promise { - this.logger.debug('getSessionUser called!'); return { id: '-1', description: 'Session receives administrator permissions', diff --git a/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts b/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts new file mode 100644 index 0000000000..55b0120363 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup-config.service.ts @@ -0,0 +1,600 @@ +import { forwardRef, Inject, Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { SchedulerRegistry } from '@nestjs/schedule'; +import { existsSync } from 'fs'; +import { readFile, writeFile } from 'fs/promises'; +import { join } from 'path'; + +import { CronJob } from 'cron'; +import { v4 as uuidv4 } from 'uuid'; + +import { getters } from '@app/store/index.js'; +import { + BackupJobConfig, + CreateBackupJobConfigInput, + UpdateBackupJobConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; +import { getBackupJobGroupId } from '@app/unraid-api/graph/resolvers/backup/backup.utils.js'; +import { + DestinationConfigInput, + DestinationType, + RcloneDestinationConfig, +} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; +import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js'; +import { + FlashPreprocessConfig, + RawBackupConfig, + ScriptPreprocessConfig, + SourceConfigInput, + SourceType, + ZfsPreprocessConfig, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; + +const JOB_GROUP_PREFIX = 'backup-'; + +@Injectable() +export class BackupConfigService implements OnModuleInit { + private readonly logger = new Logger(BackupConfigService.name); + private readonly configPath: string; + private configs: Map = new Map(); + + constructor( + private readonly rcloneService: RCloneService, + private readonly schedulerRegistry: SchedulerRegistry, + @Inject(forwardRef(() => BackupOrchestrationService)) + private readonly backupOrchestrationService: BackupOrchestrationService + ) { + const paths = getters.paths(); + this.configPath = join(paths.backupBase, 'backup-jobs.json'); + } + + async onModuleInit(): Promise { + await this.loadConfigs(); + } + + private transformSourceConfigInput( + input: SourceConfigInput + ): ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig { + switch (input.type) { + case SourceType.ZFS: { + if (!input.zfsConfig) { + throw new Error('ZFS configuration is required when type is ZFS'); + } + const zfsConfig = new ZfsPreprocessConfig(); + zfsConfig.label = input.zfsConfig.label || 'ZFS backup'; + zfsConfig.poolName = input.zfsConfig.poolName; + zfsConfig.datasetName = input.zfsConfig.datasetName; + zfsConfig.snapshotPrefix = input.zfsConfig.snapshotPrefix; + zfsConfig.cleanupSnapshots = input.zfsConfig.cleanupSnapshots ?? true; + zfsConfig.retainSnapshots = input.zfsConfig.retainSnapshots; + return zfsConfig; + } + + case SourceType.FLASH: { + if (!input.flashConfig) { + throw new Error('Flash configuration is required when type is FLASH'); + } + const flashConfig = new FlashPreprocessConfig(); + flashConfig.label = input.flashConfig.label || 'Flash drive backup'; + flashConfig.flashPath = input.flashConfig.flashPath || '/boot'; + flashConfig.includeGitHistory = input.flashConfig.includeGitHistory ?? true; + flashConfig.additionalPaths = input.flashConfig.additionalPaths || []; + return flashConfig; + } + + case SourceType.SCRIPT: { + if (!input.scriptConfig) { + throw new Error('Script configuration is required when type is SCRIPT'); + } + const scriptConfig = new ScriptPreprocessConfig(); + scriptConfig.label = input.scriptConfig.label || 'Script backup'; + scriptConfig.scriptPath = input.scriptConfig.scriptPath; + scriptConfig.scriptArgs = input.scriptConfig.scriptArgs || []; + scriptConfig.workingDirectory = input.scriptConfig.workingDirectory; + scriptConfig.environment = input.scriptConfig.environment; + scriptConfig.outputPath = input.scriptConfig.outputPath; + return scriptConfig; + } + + case SourceType.RAW: { + if (!input.rawConfig) { + throw new Error('Raw configuration is required when type is RAW'); + } + const rawConfig = new RawBackupConfig(); + rawConfig.label = input.rawConfig.label || 'Raw file backup'; + rawConfig.sourcePath = input.rawConfig.sourcePath; + rawConfig.excludePatterns = input.rawConfig.excludePatterns || []; + rawConfig.includePatterns = input.rawConfig.includePatterns || []; + return rawConfig; + } + + default: + throw new Error(`Unsupported source type: ${input.type}`); + } + } + + private transformDestinationConfigInput(input: DestinationConfigInput): RcloneDestinationConfig { + switch (input.type) { + case DestinationType.RCLONE: { + if (!input.rcloneConfig) { + throw new Error('RClone configuration is required when type is RCLONE'); + } + const rcloneConfig = new RcloneDestinationConfig(); + rcloneConfig.type = 'RCLONE'; + rcloneConfig.remoteName = input.rcloneConfig.remoteName; + rcloneConfig.destinationPath = input.rcloneConfig.destinationPath; + rcloneConfig.rcloneOptions = input.rcloneConfig.rcloneOptions; + return rcloneConfig; + } + + default: + throw new Error(`Unsupported destination type: ${input.type}`); + } + } + + async createBackupJobConfig(input: CreateBackupJobConfigInput): Promise { + const id = uuidv4(); + const now = new Date().toISOString(); + + // Validate input sourceConfig and destinationConfig presence + if (!input.sourceConfig) { + this.logger.error('Source configuration (sourceConfig) is required.'); + throw new Error('Source configuration (sourceConfig) is required.'); + } + if (!input.destinationConfig) { + this.logger.error('Destination configuration (destinationConfig) is required.'); + throw new Error('Destination configuration (destinationConfig) is required.'); + } + + // Extract sourceType and destinationType from the respective config objects + const sourceType = input.sourceConfig.type; + const destinationType = input.destinationConfig.type; + + if (!sourceType) { + this.logger.error("Source configuration must include a valid 'type' property."); + throw new Error("Source configuration must include a valid 'type' property."); + } + if (!destinationType) { + this.logger.error("Destination configuration must include a valid 'type' property."); + throw new Error("Destination configuration must include a valid 'type' property."); + } + + // Transform the source config input into the appropriate union member + const transformedSourceConfig = this.transformSourceConfigInput(input.sourceConfig); + + // Transform the destination config input into the appropriate union member + const transformedDestinationConfig = this.transformDestinationConfigInput( + input.destinationConfig + ); + + const config: BackupJobConfig = { + id, + name: input.name, + sourceType, + destinationType, + schedule: input.schedule || '0 2 * * *', + enabled: input.enabled, + sourceConfig: transformedSourceConfig, + destinationConfig: transformedDestinationConfig, + createdAt: now, + updatedAt: now, + }; + + this.configs.set(id, config); + await this.saveConfigs(); + + if (config.enabled) { + this.scheduleJob(config); + } + + return config; + } + + async updateBackupJobConfig( + id: string, + input: UpdateBackupJobConfigInput + ): Promise { + this.logger.debug( + `[updateBackupJobConfig] Called with ID: ${id}, Input: ${JSON.stringify(input)}` + ); + const existing = this.configs.get(id); + if (!existing) { + this.logger.warn(`[updateBackupJobConfig] No existing config found for ID: ${id}`); + return null; + } + this.logger.debug( + `[updateBackupJobConfig] Existing config for ID ${id}: ${JSON.stringify(existing)}` + ); + + // Handle sourceConfig update + let updatedSourceConfig = existing.sourceConfig; + let updatedSourceType = existing.sourceType; + if (input.sourceConfig) { + const inputSourceType = input.sourceConfig.type; + if (!inputSourceType) { + this.logger.warn( + `[updateBackupJobConfig] Source config update for ID ${id} is missing 'type'. Update skipped for sourceConfig.` + ); + } else { + // Transform the input into the appropriate union member + updatedSourceConfig = this.transformSourceConfigInput(input.sourceConfig); + updatedSourceType = inputSourceType; + this.logger.debug(`[updateBackupJobConfig] Transformed sourceConfig for ${id}.`); + } + } + + // Handle destinationConfig update + let updatedDestinationConfig = existing.destinationConfig; + let updatedDestinationType = existing.destinationType; + if (input.destinationConfig) { + const inputDestinationType = input.destinationConfig.type; + if (!inputDestinationType) { + this.logger.warn( + `[updateBackupJobConfig] Destination config update for ID ${id} is missing 'type'. Update skipped for destinationConfig.` + ); + } else { + // Transform the input into the appropriate union member + updatedDestinationConfig = this.transformDestinationConfigInput(input.destinationConfig); + updatedDestinationType = inputDestinationType; + this.logger.debug(`[updateBackupJobConfig] Updated destinationConfig for ${id}.`); + } + } + + const updated: BackupJobConfig = { + ...existing, + name: input.name ?? existing.name, + schedule: input.schedule ?? existing.schedule, + enabled: input.enabled ?? existing.enabled, + sourceType: updatedSourceType, + destinationType: updatedDestinationType, + sourceConfig: updatedSourceConfig, + destinationConfig: updatedDestinationConfig, + updatedAt: new Date().toISOString(), + lastRunAt: input.lastRunAt !== undefined ? input.lastRunAt : existing.lastRunAt, + lastRunStatus: + input.lastRunStatus !== undefined ? input.lastRunStatus : existing.lastRunStatus, + }; + + this.logger.debug( + `[updateBackupJobConfig] Updated object for ID ${id} (before set): ${JSON.stringify(updated)}` + ); + + this.configs.set(id, updated); + await this.saveConfigs(); + this.logger.debug(`[updateBackupJobConfig] Configs saved for ID: ${id}`); + + this.unscheduleJob(id); + if (updated.enabled) { + this.scheduleJob(updated); + } + + return updated; + } + + async deleteBackupJobConfig(id: string): Promise { + const config = this.configs.get(id); + if (!config) return false; + + this.unscheduleJob(id); + this.configs.delete(id); + await this.saveConfigs(); + return true; + } + + async getBackupJobConfig(id: string): Promise { + this.logger.debug(`[getBackupJobConfig] Called for ID: ${id}`); + const config = this.configs.get(id); + if (config) { + this.logger.debug( + `[getBackupJobConfig] Found config for ID ${id}: ${JSON.stringify(config)}` + ); + } else { + this.logger.warn(`[getBackupJobConfig] No config found for ID: ${id}`); + } + return config || null; + } + + async getAllBackupJobConfigs(): Promise { + return Array.from(this.configs.values()); + } + + private transformPlainObjectToSourceConfig( + obj: any, + sourceType: SourceType + ): ZfsPreprocessConfig | FlashPreprocessConfig | ScriptPreprocessConfig | RawBackupConfig { + switch (sourceType) { + case SourceType.ZFS: { + const zfsConfig = new ZfsPreprocessConfig(); + Object.assign(zfsConfig, obj); + return zfsConfig; + } + case SourceType.FLASH: { + const flashConfig = new FlashPreprocessConfig(); + Object.assign(flashConfig, obj); + return flashConfig; + } + case SourceType.SCRIPT: { + const scriptConfig = new ScriptPreprocessConfig(); + Object.assign(scriptConfig, obj); + return scriptConfig; + } + case SourceType.RAW: { + const rawConfig = new RawBackupConfig(); + Object.assign(rawConfig, obj); + return rawConfig; + } + default: + this.logger.error( + `Unsupported source type encountered during plain object transformation: ${sourceType as string}` + ); + throw new Error(`Unsupported source type: ${sourceType as string}`); + } + } + + private transformPlainObjectToDestinationConfig( + obj: any, + destinationType: DestinationType + ): RcloneDestinationConfig { + switch (destinationType) { + case DestinationType.RCLONE: { + const rcloneConfig = new RcloneDestinationConfig(); + Object.assign(rcloneConfig, obj); + return rcloneConfig; + } + + default: + throw new Error(`Unsupported destination type: ${destinationType}`); + } + } + + private async executeBackupJob(config: BackupJobConfig): Promise { + this.logger.log( + `Executing backup job via BackupOrchestrationService: ${config.name} (ID: ${config.id})` + ); + + // Prepare updates, currentJobId will be set after job starts + const updatesForInMemoryConfig: Partial = { + lastRunAt: new Date().toISOString(), + lastRunStatus: 'Starting...', + currentJobId: undefined, // Initialize + }; + + try { + // Delegate to the BackupOrchestrationService and get the jobId + // IMPORTANT: This assumes backupOrchestrationService.executeBackupJob is modified to return the jobId string + const jobId = await this.backupOrchestrationService.executeBackupJob(config, config.id); + + if (jobId) { + updatesForInMemoryConfig.currentJobId = jobId; + this.logger.log( + `Backup job ${config.name} (ID: ${config.id}) initiated by BackupOrchestrationService with Job ID: ${jobId}.` + ); + } else { + this.logger.warn( + `BackupOrchestrationService.executeBackupJob did not return a jobId for config ${config.id}. currentJobId will not be set.` + ); + } + + // Update the in-memory config with all changes including currentJobId + const currentConfig = this.configs.get(config.id); + if (currentConfig) { + this.configs.set(config.id, { + ...currentConfig, + ...updatesForInMemoryConfig, + }); + } else { + this.logger.warn( + `Config ${config.id} not found in memory map after starting job. State may be inconsistent.` + ); + // Fallback: attempt to set it anyway, though this indicates a potential issue + this.configs.set(config.id, { + ...config, // Use the passed config as a base + ...updatesForInMemoryConfig, + }); + } + + // Persist only non-transient parts to backup-jobs.json + // Create a separate object for saving that omits currentJobId + const configToPersist = { + ...(this.configs.get(config.id) || config), // Get the most up-to-date version from memory + }; + delete configToPersist.currentJobId; // Ensure currentJobId is not persisted + configToPersist.lastRunAt = updatesForInMemoryConfig.lastRunAt; + configToPersist.lastRunStatus = updatesForInMemoryConfig.lastRunStatus; + + // Update the map with the version to be persisted, then save + // This is tricky because we want currentJobId in memory but not on disk. + // A better approach might be to manage currentJobId in a separate map or handle it during serialization. + // For now, we'll update the main config, then save a version without currentJobId. + // This means this.configs.get(config.id) will have currentJobId. + + // Create a shallow copy for saving, minus currentJobId. + const { currentJobId: _, ...persistentConfigData } = this.configs.get(config.id)!; + // Create a new map for saving or filter this.configs map during saveConfigs() + // To avoid mutating this.configs directly for persistence: + const tempConfigsForSave = new Map(this.configs); + tempConfigsForSave.set(config.id, persistentConfigData as BackupJobConfig); + // Modify saveConfigs to accept a map or make it aware of not saving currentJobId. + // For simplicity now, we'll assume saveConfigs handles this or we handle it before calling. + // The current saveConfigs just iterates this.configs.values(). + + // Let's ensure the main in-memory config (this.configs) has currentJobId. + // And when saving, saveConfigs needs to be aware or we provide a filtered list. + + // Simplification: Save current status but not currentJobId. + // We will modify saveConfigs later if needed. For now, this means currentJobId is purely in-memory. + // The state in `this.configs` *will* have `currentJobId`. + // `saveConfigs` will write it to disk if not handled. + // Let's assume for now this is acceptable and address saveConfigs separately if `currentJobId` appears in JSON. + // The current saveConfigs WILL persist currentJobId. + // + // Correct approach: Update in-memory, then save a version *without* currentJobId. + // This requires `saveConfigs` to be smarter or to pass it a temporary, filtered list. + // The `this.configs.set(config.id, persistentConfig)` line from my thought process was problematic. + + // The in-memory `this.configs.get(config.id)` now correctly has the `currentJobId`. + // When `saveConfigs()` is called, it will iterate `this.configs.values()`. + // We need to ensure `currentJobId` is stripped before writing to JSON. + // This should be done in `saveConfigs` or by passing a "cleaned" list to `writeFile`. + // For now, let `saveConfigs` persist it and we can clean it up in a follow-up if it's an issue. + // The immediate goal is for the GraphQL resolver to see currentJobId. + + // Save the config with lastRunAt and lastRunStatus (currentJobId will also be saved by current saveConfigs) + await this.saveConfigs(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error( + `Backup job ${config.name} (ID: ${config.id}) failed during orchestration: ${errorMessage}`, + (error as Error).stack + ); + + const currentConfig = this.configs.get(config.id); + const failedConfigUpdate = { + lastRunAt: new Date().toISOString(), + lastRunStatus: `Failed: ${errorMessage}`, + currentJobId: undefined, // Clear currentJobId on failure + }; + + if (currentConfig) { + this.configs.set(config.id, { + ...currentConfig, + ...failedConfigUpdate, + }); + } else { + // If not in map, use passed config as base + this.configs.set(config.id, { + ...config, + ...failedConfigUpdate, + }); + } + await this.saveConfigs(); // Save updated status, currentJobId will be cleared + throw error; + } + } + + // Add a new method to be called when a job completes or is stopped + public async handleJobCompletion( + configId: string, + finalStatus: string, + jobId?: string + ): Promise { + const config = this.configs.get(configId); + if (config) { + this.logger.log( + `Handling job completion for config ${configId}, job ${jobId}. Final status: ${finalStatus}` + ); + + const updates: Partial = { + lastRunStatus: finalStatus, + lastRunAt: new Date().toISOString(), // Update lastRunAt to completion time + }; + + // Only clear currentJobId if it matches the completed/stopped job + if (config.currentJobId === jobId) { + updates.currentJobId = undefined; + } else if (jobId && config.currentJobId) { + this.logger.warn( + `Completed job ID ${jobId} does not match currentJobId ${config.currentJobId} for config ${configId}. currentJobId not cleared.` + ); + } + + this.configs.set(configId, { + ...config, + ...updates, + }); + + // currentJobId will be cleared or remain as is in memory. + // saveConfigs will persist this state. + await this.saveConfigs(); + } else { + this.logger.warn(`Config ${configId} not found when trying to handle job completion.`); + } + } + + private scheduleJob(config: BackupJobConfig): void { + try { + const job = new CronJob( + config.schedule, + () => this.executeBackupJob(config), + null, + false, + 'UTC' + ); + + this.schedulerRegistry.addCronJob(getBackupJobGroupId(config.id), job); + job.start(); + this.logger.log(`Scheduled backup job: ${config.name} with schedule: ${config.schedule}`); + } catch (error) { + this.logger.error(`Failed to schedule backup job ${config.name}:`, error); + } + } + + private unscheduleJob(id: string): void { + try { + const jobName = getBackupJobGroupId(id); + if (this.schedulerRegistry.doesExist('cron', jobName)) { + this.schedulerRegistry.deleteCronJob(jobName); + this.logger.log(`Unscheduled backup job: ${id}`); + } else { + this.logger.debug(`No existing cron job found to unschedule for backup job: ${id}`); + } + } catch (error) { + this.logger.error(`Failed to unschedule backup job ${id}:`, error); + } + } + + private async loadConfigs(): Promise { + try { + if (existsSync(this.configPath)) { + const data = await readFile(this.configPath, 'utf-8'); + const configs: BackupJobConfig[] = JSON.parse(data); + + // First, unschedule any existing jobs before clearing the config map + this.configs.forEach((config) => { + if (config.enabled) { + this.unscheduleJob(config.id); + } + }); + + this.configs.clear(); + configs.forEach((config) => { + // Transform plain objects back into class instances + const transformedConfig = { + ...config, + sourceConfig: this.transformPlainObjectToSourceConfig( + config.sourceConfig, + config.sourceType + ), + destinationConfig: this.transformPlainObjectToDestinationConfig( + config.destinationConfig, + config.destinationType + ), + }; + + this.configs.set(config.id, transformedConfig); + if (transformedConfig.enabled) { + this.scheduleJob(transformedConfig); + } + }); + + this.logger.log(`Loaded ${configs.length} backup job configurations`); + } + } catch (error) { + this.logger.error('Failed to load backup configurations:', error); + } + } + + private async saveConfigs(): Promise { + try { + // Create a deep copy of configs for saving, stripping currentJobId + const configsToSave: BackupJobConfig[] = []; + for (const config of this.configs.values()) { + const { currentJobId, ...restOfConfig } = config; // Destructure to remove currentJobId + configsToSave.push(restOfConfig as BackupJobConfig); // Cast needed if TS complains + } + await writeFile(this.configPath, JSON.stringify(configsToSave, null, 2)); + } catch (error) { + this.logger.error('Failed to save backup configurations:', error); + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts b/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts new file mode 100644 index 0000000000..e3efd4af5a --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup-mutations.resolver.ts @@ -0,0 +1,313 @@ +import { Logger } from '@nestjs/common'; +import { Args, ResolveField, Resolver } from '@nestjs/graphql'; + +import { Resource } from '@unraid/shared/graphql.model'; +import { PrefixedID } from '@unraid/shared/prefixed-id-scalar'; +import { + AuthActionVerb, + AuthPossession, + UsePermissions, +} from '@unraid/shared/use-permissions.directive.js'; + +import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { + BackupJobConfig, + BackupStatus, + CreateBackupJobConfigInput, + InitiateBackupInput, + UpdateBackupJobConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; +import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js'; +import { BackupMutations } from '@app/unraid-api/graph/resolvers/mutation/mutation.model.js'; +import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; + +@Resolver(() => BackupMutations) +export class BackupMutationsResolver { + private readonly logger = new Logger(BackupMutationsResolver.name); + + constructor( + private readonly backupConfigService: BackupConfigService, + private readonly rcloneService: RCloneService, + private readonly backupOrchestrationService: BackupOrchestrationService + ) {} + + private async executeBackup( + sourcePath: string, + remoteName: string, + destinationPath: string, + options: Record = {}, + configId?: string + ): Promise { + try { + this.logger.log(`Executing backup: ${sourcePath} -> ${remoteName}:${destinationPath}`); + + // Create a temporary config for the orchestration service + const tempConfig: BackupJobConfig = { + id: configId || `temp-${Date.now()}`, + name: `Manual backup to ${remoteName}`, + sourceType: 'raw' as any, + destinationType: 'rclone' as any, + schedule: '', + enabled: true, + sourceConfig: { + type: 'raw', + sourcePath: sourcePath, + } as any, + destinationConfig: { + type: 'rclone', + remoteName: remoteName, + destinationPath: destinationPath, + options: options, + } as any, + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), + }; + + const jobId = tempConfig.id; + + // Use the orchestration service for execution + await this.backupOrchestrationService.executeBackupJob(tempConfig, jobId); + + this.logger.log(`Backup job initiated successfully with ID: ${jobId}`); + + return { + status: 'Backup initiated successfully', + jobId: jobId, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error( + `Failed to execute backup: ${errorMessage}`, + error instanceof Error ? error.stack : undefined + ); + + return { + status: `Failed to initiate backup: ${errorMessage}`, + jobId: undefined, + }; + } + } + + @ResolveField(() => BackupJobConfig, { + description: 'Create a new backup job configuration', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async createBackupJobConfig( + @Args('input') input: CreateBackupJobConfigInput + ): Promise { + return this.backupConfigService.createBackupJobConfig(input); + } + + @ResolveField(() => BackupJobConfig, { + description: 'Update a backup job configuration', + nullable: true, + }) + @UsePermissions({ + action: AuthActionVerb.UPDATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async updateBackupJobConfig( + @Args('id', { type: () => PrefixedID }) id: string, + @Args('input') input: UpdateBackupJobConfigInput + ): Promise { + return this.backupConfigService.updateBackupJobConfig(id, input); + } + + @ResolveField(() => Boolean, { + description: 'Delete a backup job configuration', + }) + @UsePermissions({ + action: AuthActionVerb.DELETE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async deleteBackupJobConfig(@Args('id', { type: () => PrefixedID }) id: string): Promise { + return this.backupConfigService.deleteBackupJobConfig(id); + } + + @ResolveField(() => BackupStatus, { + description: 'Initiates a backup using a configured remote.', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async initiateBackup(@Args('input') input: InitiateBackupInput): Promise { + return this.executeBackup( + input.sourcePath, + input.remoteName, + input.destinationPath, + input.options || {} + ); + } + + @ResolveField(() => BackupJobConfig, { + description: 'Toggle a backup job configuration enabled/disabled', + nullable: true, + }) + @UsePermissions({ + action: AuthActionVerb.UPDATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async toggleJobConfig( + @Args('id', { type: () => PrefixedID }) id: string + ): Promise { + const existing = await this.backupConfigService.getBackupJobConfig(id); + if (!existing) return null; + + return this.backupConfigService.updateBackupJobConfig(id, { + enabled: !existing.enabled, + }); + } + + @ResolveField(() => BackupStatus, { + description: 'Manually trigger a backup job using existing configuration', + }) + @UsePermissions({ + action: AuthActionVerb.CREATE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async triggerJob(@Args('id', { type: () => PrefixedID }) id: string): Promise { + const config = await this.backupConfigService.getBackupJobConfig(id); + if (!config) { + return { + status: 'Failed to trigger backup: Configuration not found', + jobId: undefined, + }; + } + + try { + // Use the orchestration service to execute the backup job + await this.backupOrchestrationService.executeBackupJob(config, config.id); + + // Update the config with job start information + await this.backupConfigService.updateBackupJobConfig(id, { + lastRunStatus: `Started with job ID: ${config.id}`, + lastRunAt: new Date().toISOString(), + }); + + return { + status: 'Backup job triggered successfully', + jobId: config.id, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to trigger backup job ${id}: ${errorMessage}`); + + await this.backupConfigService.updateBackupJobConfig(id, { + lastRunStatus: `Failed: ${errorMessage}`, + lastRunAt: new Date().toISOString(), + }); + + return { + status: `Failed to trigger backup: ${errorMessage}`, + jobId: undefined, + }; + } + } + + @ResolveField(() => BackupStatus, { + description: 'Stop all running backup jobs', + }) + @UsePermissions({ + action: AuthActionVerb.DELETE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async stopAllBackupJobs(): Promise { + try { + const result = await this.rcloneService['rcloneApiService'].stopAllJobs(); + const stoppedCount = result.stopped.length; + const errorCount = result.errors.length; + + if (stoppedCount > 0) { + this.logger.log(`Stopped ${stoppedCount} backup jobs`); + } + + if (errorCount > 0) { + this.logger.warn(`Failed operations on ${errorCount} jobs: ${result.errors.join(', ')}`); + } + + return { + status: `Stopped ${stoppedCount} jobs${errorCount > 0 ? `, ${errorCount} errors` : ''}`, + jobId: undefined, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to stop backup jobs: ${errorMessage}`); + return { + status: `Failed to stop backup jobs: ${errorMessage}`, + jobId: undefined, + }; + } + } + + @ResolveField(() => BackupStatus, { + description: 'Stop a specific backup job', + }) + @UsePermissions({ + action: AuthActionVerb.DELETE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async stopBackupJob(@Args('id', { type: () => PrefixedID }) id: string): Promise { + try { + const result = await this.rcloneService['rcloneApiService'].stopJob(id); + const stoppedCount = result.stopped.length; + const errorCount = result.errors.length; + + if (stoppedCount > 0) { + this.logger.log(`Stopped backup job: ${id}`); + } + + if (errorCount > 0) { + this.logger.warn(`Failed to stop job ${id}: ${result.errors.join(', ')}`); + } + + return { + status: stoppedCount > 0 ? `Stopped job ${id}` : `Failed to stop job ${id}`, + jobId: stoppedCount > 0 ? id : undefined, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to stop backup job ${id}: ${errorMessage}`); + return { + status: `Failed to stop backup job: ${errorMessage}`, + jobId: undefined, + }; + } + } + + @ResolveField(() => BackupStatus, { + description: 'Forget all finished backup jobs to clean up the job list', + }) + @UsePermissions({ + action: AuthActionVerb.DELETE, + resource: Resource.BACKUP, + possession: AuthPossession.ANY, + }) + async forgetFinishedBackupJobs(): Promise { + try { + this.logger.log('Forgetting finished backup jobs is handled automatically by RClone'); + return { + status: 'Finished jobs are automatically cleaned up by RClone', + jobId: undefined, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to forget finished backup jobs: ${errorMessage}`); + return { + status: `Failed to forget finished backup jobs: ${errorMessage}`, + jobId: undefined, + }; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.model.ts b/api/src/unraid-api/graph/resolvers/backup/backup.model.ts new file mode 100644 index 0000000000..18d1fe8f75 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup.model.ts @@ -0,0 +1,226 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { type Layout } from '@jsonforms/core'; +import { Node } from '@unraid/shared/graphql.model.js'; +import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js'; +import { + IsBoolean, + IsNotEmpty, + IsObject, + IsOptional, + IsString, + Matches, + ValidateIf, + ValidateNested, +} from 'class-validator'; +import { DateTimeISOResolver, GraphQLJSON } from 'graphql-scalars'; + +import { + DestinationConfigInput, + DestinationConfigInputUnion, + DestinationConfigUnion, + DestinationType, +} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; +import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { + SourceConfigInput, + SourceConfigInputUnion, + SourceConfigUnion, + SourceType, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { DataSlice } from '@app/unraid-api/types/json-forms.js'; + +@ObjectType({ + implements: () => Node, +}) +export class Backup extends Node { + @Field(() => [JobStatus]) + jobs!: JobStatus[]; + + @Field(() => [BackupJobConfig]) + configs!: BackupJobConfig[]; +} + +@InputType() +export class InitiateBackupInput { + @Field(() => String, { description: 'The name of the remote configuration to use for the backup.' }) + @IsString() + @IsNotEmpty() + remoteName!: string; + + @Field(() => String, { description: 'Source path to backup.' }) + @IsString() + @IsNotEmpty() + sourcePath!: string; + + @Field(() => String, { description: 'Destination path on the remote.' }) + @IsString() + @IsNotEmpty() + destinationPath!: string; + + @Field(() => GraphQLJSON, { + description: 'Additional options for the backup operation, such as --dry-run or --transfers.', + nullable: true, + }) + @IsOptional() + @IsObject() + options?: Record; +} + +@ObjectType() +export class BackupStatus { + @Field(() => String, { + description: 'Status message indicating the outcome of the backup initiation.', + }) + status!: string; + + @Field(() => String, { + description: 'Job ID if available, can be used to check job status.', + nullable: true, + }) + jobId?: string; +} + +@ObjectType() +export class RCloneWebGuiInfo { + @Field() + url!: string; +} + +@ObjectType({ + implements: () => Node, +}) +export class BackupJobConfig extends Node { + @Field(() => String, { description: 'Human-readable name for this backup job' }) + name!: string; + + @Field(() => SourceType, { description: 'Type of the backup source' }) + sourceType!: SourceType; + + @Field(() => DestinationType, { description: 'Type of the backup destination' }) + destinationType!: DestinationType; + + @Field(() => String, { + description: 'Cron schedule expression (e.g., "0 2 * * *" for daily at 2AM)', + }) + schedule!: string; + + @Field(() => Boolean, { description: 'Whether this backup job is enabled' }) + enabled!: boolean; + + @Field(() => SourceConfigUnion, { description: 'Source configuration for this backup job' }) + sourceConfig!: typeof SourceConfigUnion; + + @Field(() => DestinationConfigUnion, { + description: 'Destination configuration for this backup job', + }) + destinationConfig!: typeof DestinationConfigUnion; + + @Field(() => DateTimeISOResolver, { description: 'When this config was created' }) + createdAt!: string; + + @Field(() => DateTimeISOResolver, { description: 'When this config was last updated' }) + updatedAt!: string; + + @Field(() => DateTimeISOResolver, { description: 'Last time this job ran', nullable: true }) + lastRunAt?: string; + + @Field(() => String, { description: 'Status of last run', nullable: true }) + lastRunStatus?: string; + + @Field(() => String, { description: 'Current running job ID for this config', nullable: true }) + currentJobId?: string; +} + +@InputType() +export class BaseBackupJobConfigInput { + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + @IsNotEmpty() + name?: string; + + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + @ValidateIf((o) => o.schedule && o.schedule.length > 0) + @Matches( + /^(\*|[0-5]?\d)(\s+(\*|[0-1]?\d|2[0-3]))(\s+(\*|[1-2]?\d|3[0-1]))(\s+(\*|[1-9]|1[0-2]))(\s+(\*|[0-6]))$/, + { + message: 'schedule must be a valid cron expression', + } + ) + schedule?: string; + + @Field(() => Boolean, { nullable: true }) + @IsOptional() + @IsBoolean() + enabled?: boolean; + + @Field(() => SourceConfigInputUnion, { + description: 'Source configuration for this backup job', + nullable: true, + }) + @IsOptional() + @ValidateNested() + sourceConfig?: SourceConfigInput; + + @Field(() => DestinationConfigInputUnion, { + description: 'Destination configuration for this backup job', + nullable: true, + }) + @IsOptional() + @ValidateNested() + destinationConfig?: DestinationConfigInput; +} + +@InputType() +export class CreateBackupJobConfigInput extends BaseBackupJobConfigInput { + @Field(() => String) + @IsString() + @IsNotEmpty() + declare name: string; + + @Field(() => Boolean, { defaultValue: true }) + @IsBoolean() + @ValidateIf((o) => o.schedule && o.schedule.length > 0) + declare enabled: boolean; +} + +@InputType() +export class UpdateBackupJobConfigInput extends BaseBackupJobConfigInput { + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + lastRunStatus?: string; + + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + lastRunAt?: string; + + @Field(() => String, { nullable: true }) + @IsOptional() + @IsString() + currentJobId?: string; +} + +@ObjectType() +export class BackupJobConfigForm { + @Field(() => PrefixedID) + id!: string; + + @Field(() => GraphQLJSON) + dataSchema!: { properties: DataSlice; type: 'object' }; + + @Field(() => GraphQLJSON) + uiSchema!: Layout; +} + +@InputType() +export class BackupJobConfigFormInput { + @Field(() => Boolean, { defaultValue: false }) + @IsOptional() + @IsBoolean() + showAdvanced?: boolean; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.module.ts b/api/src/unraid-api/graph/resolvers/backup/backup.module.ts new file mode 100644 index 0000000000..8fe0749dc8 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup.module.ts @@ -0,0 +1,30 @@ +import { forwardRef, Module } from '@nestjs/common'; +import { ScheduleModule } from '@nestjs/schedule'; + +import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { BackupMutationsResolver } from '@app/unraid-api/graph/resolvers/backup/backup-mutations.resolver.js'; +import { + BackupJobConfigResolver, + BackupResolver, +} from '@app/unraid-api/graph/resolvers/backup/backup.resolver.js'; +import { BackupDestinationModule } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.module.js'; +import { BackupJobStatusResolver } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.resolver.js'; +import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js'; +import { BackupOrchestrationService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.js'; +import { BackupSourceModule } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.module.js'; +import { RCloneModule } from '@app/unraid-api/graph/resolvers/rclone/rclone.module.js'; + +@Module({ + imports: [RCloneModule, ScheduleModule.forRoot(), BackupSourceModule, BackupDestinationModule], + providers: [ + BackupResolver, + BackupJobConfigResolver, + BackupMutationsResolver, + BackupConfigService, + BackupOrchestrationService, + BackupJobTrackingService, + BackupJobStatusResolver, + ], + exports: [forwardRef(() => BackupOrchestrationService), BackupJobTrackingService], +}) +export class BackupModule {} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts b/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts new file mode 100644 index 0000000000..a696b02b27 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup.resolver.ts @@ -0,0 +1,131 @@ +import { Logger } from '@nestjs/common'; +import { Args, Parent, Query, ResolveField, Resolver } from '@nestjs/graphql'; + +import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js'; + +import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { + Backup, + BackupJobConfig, + BackupJobConfigForm, + BackupJobConfigFormInput, + BackupStatus, +} from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; +import { buildBackupJobConfigSchema } from '@app/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.js'; +import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js'; +import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +@Resolver(() => Backup) +export class BackupResolver { + private readonly logger = new Logger(BackupResolver.name); + + constructor( + private readonly rcloneService: RCloneService, + private readonly backupConfigService: BackupConfigService, + private readonly formatService: FormatService, + private readonly backupJobTrackingService: BackupJobTrackingService + ) {} + + @Query(() => Backup, { + description: 'Get backup service information', + }) + async backup(): Promise { + return { + id: 'backup', + jobs: [], + configs: [], + }; + } + + @ResolveField(() => [JobStatus], { + description: 'Get all running backup jobs', + }) + async jobs(): Promise { + return this.backupJobTrackingService.getAllJobStatuses(); + } + + @ResolveField(() => [BackupJobConfig], { + description: 'Get all backup job configurations', + }) + async configs(): Promise { + return this.backupConfigService.getAllBackupJobConfigs(); + } + + @Query(() => BackupJobConfig, { + description: 'Get a specific backup job configuration', + nullable: true, + }) + async backupJobConfig( + @Args('id', { type: () => PrefixedID }) id: string + ): Promise { + return this.backupConfigService.getBackupJobConfig(id); + } + + @Query(() => JobStatus, { + description: 'Get status of a specific backup job', + nullable: true, + }) + async backupJob(@Args('id', { type: () => PrefixedID }) id: string): Promise { + return this.backupJobTrackingService.getJobStatus(id) || null; + } + + @ResolveField(() => BackupStatus, { + description: 'Get the status for the backup service', + }) + async status(): Promise { + return { + status: 'Available', + jobId: undefined, + }; + } + + @Query(() => BackupJobConfigForm, { + description: 'Get the JSON schema for backup job configuration form', + }) + async backupJobConfigForm( + @Args('input', { nullable: true }) input?: BackupJobConfigFormInput + ): Promise { + const remotes = await this.rcloneService.getRemoteDetails(); + + const { dataSchema, uiSchema } = buildBackupJobConfigSchema({ + remotes, + }); + + return { + id: 'backup-job-config-form', + dataSchema, + uiSchema, + }; + } +} + +@Resolver(() => BackupJobConfig) +export class BackupJobConfigResolver { + private readonly logger = new Logger(BackupJobConfigResolver.name); + + constructor(private readonly backupJobTrackingService: BackupJobTrackingService) {} + + @ResolveField(() => JobStatus, { + description: 'Get the current running job for this backup config', + nullable: true, + }) + async currentJob(@Parent() config: BackupJobConfig): Promise { + if (!config.currentJobId) { + return null; + } + + this.logger.debug( + `Looking for current job for config ${config.id} using currentJobId: ${config.currentJobId}` + ); + + const jobStatus = this.backupJobTrackingService.getJobStatus(config.currentJobId); + if (!jobStatus) { + this.logger.debug(`No job status found for job ID: ${config.currentJobId}`); + return null; + } + + return jobStatus as JobStatus; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/backup.utils.ts b/api/src/unraid-api/graph/resolvers/backup/backup.utils.ts new file mode 100644 index 0000000000..b0f0dbc4f9 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/backup.utils.ts @@ -0,0 +1,32 @@ +export const BACKUP_JOB_GROUP_PREFIX = 'backup-'; + +/** + * Generates the group ID for a backup job based on its configuration ID. + * This group ID is used by RClone to group related backup operations. + * @param configId The ID of the backup job configuration. + * @returns The RClone group ID string. + */ +export function getBackupJobGroupId(configId: string): string { + return `${BACKUP_JOB_GROUP_PREFIX}${configId}`; +} + +/** + * Extracts the configuration ID from a backup job group ID. + * @param groupId The RClone group ID string (e.g., "backup-someConfigId"). + * @returns The configuration ID if the group ID is valid and prefixed, otherwise undefined. + */ +export function getConfigIdFromGroupId(groupId: string): string | undefined { + if (groupId.startsWith(BACKUP_JOB_GROUP_PREFIX)) { + return groupId.substring(BACKUP_JOB_GROUP_PREFIX.length); + } + return undefined; +} + +/** + * Checks if the given ID corresponds to a backup job group. + * @param id The ID string to check (can be a job ID or a group ID). + * @returns True if the ID represents a backup job group, false otherwise. + */ +export function isBackupJobGroup(id: string): boolean { + return id.startsWith(BACKUP_JOB_GROUP_PREFIX); +} diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-jsonforms.config.ts b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-jsonforms.config.ts new file mode 100644 index 0000000000..4ef27dc5e1 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-jsonforms.config.ts @@ -0,0 +1,180 @@ +import type { LabelElement, SchemaBasedCondition } from '@jsonforms/core'; +import { JsonSchema7, RuleEffect } from '@jsonforms/core'; + +import type { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import type { SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js'; +import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; +import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js'; + +export function getDestinationConfigSlice({ remotes = [] }: { remotes?: RCloneRemote[] }): SettingSlice { + const destinationConfigElements: UIElement[] = [ + { + type: 'Control', + scope: '#/properties/destinationConfig/properties/type', + options: { + format: 'radio', + radioLayout: 'horizontal', + options: [ + { + label: 'RClone Remote', + value: DestinationType.RCLONE, + description: 'Backup to cloud storage via RClone', + }, + ], + }, + }, + + // RClone Configuration + { + type: 'VerticalLayout', + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/destinationConfig/properties/type', + schema: { const: DestinationType.RCLONE }, + } as SchemaBasedCondition, + }, + elements: [ + { + type: 'Label', + text: 'RClone Configuration', + options: { + description: 'Configure RClone remote destination settings.', + }, + } as LabelElement, + + createLabeledControl({ + scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/remoteName', + label: 'Remote Configuration', + description: 'Select the RClone remote configuration to use for this backup', + controlOptions: { + suggestions: remotes.map((remote) => ({ + value: remote.name, + label: `${remote.name} (${remote.type})`, + })), + }, + }), + + createLabeledControl({ + scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/destinationPath', + label: 'Destination Path', + description: + 'The path on the remote where files will be stored (e.g., backups/documents)', + controlOptions: { + placeholder: 'backups/', + format: 'string', + }, + }), + + createLabeledControl({ + scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/rcloneOptions/properties/transfers', + label: 'Number of Transfers', + description: 'Number of file transfers to run in parallel (default: 4)', + controlOptions: { + placeholder: '4', + format: 'number', + }, + }), + + createLabeledControl({ + scope: '#/properties/destinationConfig/properties/rcloneConfig/properties/rcloneOptions/properties/checkers', + label: 'Number of Checkers', + description: 'Number of checkers to run in parallel (default: 8)', + controlOptions: { + placeholder: '8', + format: 'number', + }, + }), + ], + }, + ]; + + const destinationConfigProperties: Record = { + destinationConfig: { + type: 'object', + title: 'Destination Configuration', + description: 'Configuration for backup destination', + properties: { + type: { + type: 'string', + title: 'Destination Type', + description: 'Type of destination to use for backup', + enum: [DestinationType.RCLONE], + default: DestinationType.RCLONE, + }, + rcloneConfig: { + type: 'object', + title: 'RClone Configuration', + properties: { + remoteName: { + type: 'string', + title: 'Remote Name', + description: 'Remote name from rclone config', + enum: + remotes.length > 0 + ? remotes.map((remote) => remote.name) + : ['No remotes configured'], + }, + destinationPath: { + type: 'string', + title: 'Destination Path', + description: 'Destination path on the remote', + minLength: 1, + }, + rcloneOptions: { + type: 'object', + title: 'RClone Options', + description: 'Advanced RClone configuration options', + properties: { + transfers: { + type: 'integer', + title: 'Transfers', + description: 'Number of file transfers to run in parallel', + minimum: 1, + maximum: 100, + default: 4, + }, + checkers: { + type: 'integer', + title: 'Checkers', + description: 'Number of checkers to run in parallel', + minimum: 1, + maximum: 100, + default: 8, + }, + }, + }, + }, + required: ['remoteName', 'destinationPath'], + }, + }, + required: ['type'], + }, + }; + + // Apply conditional logic for destinationConfig + if ( + destinationConfigProperties.destinationConfig && + typeof destinationConfigProperties.destinationConfig === 'object' + ) { + destinationConfigProperties.destinationConfig.allOf = [ + { + if: { properties: { type: { const: DestinationType.RCLONE } }, required: ['type'] }, + then: { + required: ['rcloneConfig'], + }, + }, + ]; + } + + const verticalLayoutElement: UIElement = { + type: 'VerticalLayout', + elements: destinationConfigElements, + options: { step: 2 }, + }; + + return { + properties: destinationConfigProperties, + elements: [verticalLayoutElement], + }; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.ts b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.ts new file mode 100644 index 0000000000..f861e5a0ac --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.ts @@ -0,0 +1,59 @@ +import { Writable } from 'stream'; + +import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; + +export interface BackupDestinationConfig { + timeout: number; + cleanupOnFailure: boolean; + useStreaming?: boolean; + supportsStreaming?: boolean; +} + +export interface BackupDestinationResult { + success: boolean; + destinationPath?: string; + uploadedBytes?: number; + error?: string; + cleanupRequired?: boolean; + metadata?: Record; +} + +export interface StreamingDestinationHandle { + stream: Writable; + completionPromise: Promise; +} + +export interface BackupDestinationProcessorOptions { + jobId?: string; + onProgress?: (progress: number) => void; + onOutput?: (data: string) => void; + onError?: (error: string) => void; +} + +export abstract class BackupDestinationProcessor { + abstract readonly destinationType: DestinationType; + + abstract execute( + sourcePath: string, + config: TConfig, + options?: BackupDestinationProcessorOptions + ): Promise; + + abstract validate(config: TConfig): Promise<{ valid: boolean; error?: string; warnings?: string[] }>; + + abstract cleanup(result: BackupDestinationResult): Promise; + + // Getter to check if processor supports streaming + abstract get supportsStreaming(): boolean; + + // Optional getter to get a writable stream for streaming backups + get getWritableStream(): + | (( + config: TConfig, + jobId: string, + options?: BackupDestinationProcessorOptions + ) => Promise) + | undefined { + return undefined; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.module.ts b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.module.ts new file mode 100644 index 0000000000..2f486f76a1 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.module.ts @@ -0,0 +1,13 @@ +import { forwardRef, Module } from '@nestjs/common'; + +import { BackupDestinationService } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.service.js'; +import { RCloneDestinationProcessor } from '@app/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.js'; +import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js'; +import { RCloneModule } from '@app/unraid-api/graph/resolvers/rclone/rclone.module.js'; + +@Module({ + imports: [forwardRef(() => RCloneModule)], + providers: [RCloneApiService, BackupDestinationService, RCloneDestinationProcessor], + exports: [BackupDestinationService, RCloneDestinationProcessor], +}) +export class BackupDestinationModule {} diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.service.ts b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.service.ts new file mode 100644 index 0000000000..e49655ac5d --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.service.ts @@ -0,0 +1,85 @@ +import { BadRequestException, Injectable, Logger } from '@nestjs/common'; +import { EventEmitter } from 'events'; + +import { v4 as uuidv4 } from 'uuid'; + +import { + BackupDestinationConfig, + BackupDestinationProcessor, + BackupDestinationProcessorOptions, + BackupDestinationResult, +} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js'; +import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; +import { RCloneDestinationProcessor } from '@app/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.js'; + +export interface BackupDestinationOptions { + jobId?: string; + onProgress?: (progress: number) => void; + onOutput?: (data: string) => void; + onError?: (error: string) => void; +} + +@Injectable() +export class BackupDestinationService extends EventEmitter { + private readonly logger = new Logger(BackupDestinationService.name); + + constructor(private readonly rcloneDestinationProcessor: RCloneDestinationProcessor) { + super(); + } + + async processDestination( + sourcePath: string, + config: T, + options?: BackupDestinationOptions + ): Promise { + const processor = this.getProcessor(config.type); + if (!processor) { + throw new BadRequestException(`Unsupported destination type: ${config.type}`); + } + + const processorOptions: BackupDestinationProcessorOptions = { + jobId: options?.jobId || uuidv4(), + onProgress: options?.onProgress, + onOutput: options?.onOutput, + onError: options?.onError, + }; + + try { + const result = await processor.execute(sourcePath, config, processorOptions); + this.logger.log(`Destination processing completed for type: ${config.type}`); + return result; + } catch (error) { + this.logger.error(`Destination processing failed for type: ${config.type}`, error); + throw error; + } + } + + async cancelDestinationJob(jobId: string): Promise { + this.logger.log(`Attempting to cancel destination job: ${jobId}`); + + try { + const result = await this.rcloneDestinationProcessor.execute('', {} as any, { jobId }); + if (result.metadata?.jobId) { + this.logger.log(`Cancelled destination job: ${jobId}`); + return true; + } + } catch (error) { + this.logger.warn(`Failed to cancel destination job ${jobId}:`, error); + } + + return false; + } + + async cleanup(): Promise { + this.logger.log('Cleaning up destination service...'); + } + + public getProcessor(type: DestinationType): BackupDestinationProcessor | null { + switch (type) { + case DestinationType.RCLONE: + return this.rcloneDestinationProcessor; + default: + return null; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.types.ts b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.types.ts new file mode 100644 index 0000000000..9c682563af --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/backup-destination.types.ts @@ -0,0 +1,95 @@ +import { createUnionType, Field, InputType, ObjectType, registerEnumType } from '@nestjs/graphql'; + +import { Type } from 'class-transformer'; +import { IsEnum, IsNotEmpty, IsObject, IsOptional, IsString, ValidateNested } from 'class-validator'; +import { GraphQLJSON } from 'graphql-scalars'; + +import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; + +export enum DestinationType { + RCLONE = 'RCLONE', +} + +registerEnumType(DestinationType, { + name: 'DestinationType', +}); + +export interface StreamingJobInfo { + jobId: string; + status: BackupJobStatus; + progress?: number; + startTime: Date; + endTime?: Date; + error?: string; +} + +@ObjectType() +export class RcloneDestinationConfig { + @Field(() => String) + type!: 'RCLONE'; + + @Field(() => String, { description: 'Remote name from rclone config' }) + remoteName!: string; + + @Field(() => String, { description: 'Destination path on the remote' }) + destinationPath!: string; + + @Field(() => GraphQLJSON, { + description: 'RClone options (e.g., --transfers, --checkers)', + nullable: true, + }) + rcloneOptions?: Record; + + static isTypeOf(obj: any): obj is RcloneDestinationConfig { + return ( + obj && + obj.type === 'RCLONE' && + typeof obj.remoteName === 'string' && + typeof obj.destinationPath === 'string' + ); + } +} + +@InputType() +export class RcloneDestinationConfigInput { + @Field(() => String) + @IsString() + @IsNotEmpty() + remoteName!: string; + + @Field(() => String) + @IsString() + @IsNotEmpty() + destinationPath!: string; + + @Field(() => GraphQLJSON, { nullable: true }) + @IsOptional() + @IsObject() + rcloneOptions?: Record; +} + +@InputType() +export class DestinationConfigInput { + @Field(() => DestinationType, { nullable: false }) + @IsEnum(DestinationType, { message: 'Invalid destination type' }) + type!: DestinationType; + + @Field(() => RcloneDestinationConfigInput, { nullable: true }) + @IsOptional() + @ValidateNested() + @Type(() => RcloneDestinationConfigInput) + rcloneConfig?: RcloneDestinationConfigInput; +} + +export const DestinationConfigUnion = createUnionType({ + name: 'DestinationConfigUnion', + types: () => [RcloneDestinationConfig] as const, + resolveType(obj: any) { + if (RcloneDestinationConfig.isTypeOf && RcloneDestinationConfig.isTypeOf(obj)) { + return RcloneDestinationConfig; + } + return null; + }, +}); + +export const DestinationConfigInputUnion = DestinationConfigInput; diff --git a/api/src/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.ts b/api/src/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.ts new file mode 100644 index 0000000000..3ea07ee993 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/destination/rclone/rclone-destination-processor.service.ts @@ -0,0 +1,357 @@ +import { Injectable, Logger } from '@nestjs/common'; + +import { execa } from 'execa'; + +import { getBackupJobGroupId } from '@app/unraid-api/graph/resolvers/backup/backup.utils.js'; +import { + BackupDestinationConfig, + BackupDestinationProcessor, + BackupDestinationProcessorOptions, + BackupDestinationResult, + StreamingDestinationHandle, +} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js'; +import { DestinationType } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.types.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js'; + +export interface RCloneDestinationConfig extends BackupDestinationConfig { + remoteName: string; + destinationPath: string; + transferOptions?: Record; + useStreaming?: boolean; + sourceCommand?: string; + sourceArgs?: string[]; + sourceType?: SourceType; +} + +@Injectable() +export class RCloneDestinationProcessor extends BackupDestinationProcessor { + readonly destinationType = DestinationType.RCLONE; + private readonly logger = new Logger(RCloneDestinationProcessor.name); + + constructor(private readonly rcloneApiService: RCloneApiService) { + super(); + } + + async execute( + sourcePath: string, + config: RCloneDestinationConfig, + options: BackupDestinationProcessorOptions = {} + ): Promise { + const { jobId = 'unknown', onProgress, onOutput, onError } = options; + + try { + this.logger.log( + `Starting RClone upload job ${jobId} from ${sourcePath} to ${config.remoteName}:${config.destinationPath}` + ); + + return await this.executeRegularBackup(sourcePath, config, options); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown RClone error'; + this.logger.error(`RClone upload job ${jobId} failed: ${errorMessage}`, error); + + if (onError) { + onError(errorMessage); + } + + return { + success: false, + error: errorMessage, + cleanupRequired: config.cleanupOnFailure, + }; + } + } + + private async executeRegularBackup( + sourcePath: string, + config: RCloneDestinationConfig, + options: BackupDestinationProcessorOptions + ): Promise { + const { jobId: backupConfigId, onOutput, onProgress, onError } = options; + + if (!backupConfigId) { + const errorMsg = 'Backup Configuration ID (jobId) is required to start RClone backup.'; + this.logger.error(errorMsg); + if (onError) { + onError(errorMsg); + } + return { + success: false, + error: errorMsg, + cleanupRequired: config.cleanupOnFailure, + }; + } + + await this.rcloneApiService.startBackup({ + srcPath: sourcePath, + dstPath: `${config.remoteName}:${config.destinationPath}`, + async: true, + configId: backupConfigId, + options: config.transferOptions, + }); + + const groupIdToMonitor = getBackupJobGroupId(backupConfigId); + + if (onOutput) { + onOutput( + `RClone backup process initiated for group: ${groupIdToMonitor}. Monitoring progress...` + ); + } + + let jobStatus = await this.rcloneApiService.getEnhancedJobStatus( + groupIdToMonitor, + backupConfigId + ); + this.logger.debug('Rclone Job Status: %o', jobStatus); + let retries = 0; + const effectiveTimeout = config.timeout && config.timeout >= 60000 ? config.timeout : 3600000; + const maxRetries = Math.floor(effectiveTimeout / 5000); + + while (jobStatus && !jobStatus.finished && retries < maxRetries) { + await new Promise((resolve) => setTimeout(resolve, 5000)); + + try { + jobStatus = await this.rcloneApiService.getEnhancedJobStatus( + groupIdToMonitor, + backupConfigId + ); + if (jobStatus && onProgress && jobStatus.progressPercentage !== undefined) { + onProgress(jobStatus.progressPercentage); + } + if (jobStatus && onOutput && jobStatus.stats?.speed) { + onOutput(`Group ${groupIdToMonitor} - Transfer speed: ${jobStatus.stats.speed} B/s`); + } + } catch (pollError: any) { + this.logger.warn( + `[${backupConfigId}] Error polling group status for ${groupIdToMonitor}: ${(pollError as Error).message}` + ); + } + retries++; + } + + if (!jobStatus) { + const errorMsg = `Failed to get final job status for RClone group ${groupIdToMonitor}`; + this.logger.error(`[${backupConfigId}] ${errorMsg}`); + if (onError) { + onError(errorMsg); + } + return { + success: false, + error: errorMsg, + destinationPath: `${config.remoteName}:${config.destinationPath}`, + cleanupRequired: config.cleanupOnFailure, + }; + } + + if (jobStatus.finished && jobStatus.success) { + if (onProgress) { + onProgress(100); + } + if (onOutput) { + onOutput(`RClone backup for group ${groupIdToMonitor} completed successfully.`); + } + return { + success: true, + destinationPath: `${config.remoteName}:${config.destinationPath}`, + metadata: { + groupId: groupIdToMonitor, + remoteName: config.remoteName, + remotePath: config.destinationPath, + transferOptions: config.transferOptions, + stats: jobStatus.stats, + }, + }; + } else { + let errorMsg: string; + if (!jobStatus.finished && retries >= maxRetries) { + errorMsg = `RClone group ${groupIdToMonitor} timed out after ${effectiveTimeout / 1000} seconds.`; + this.logger.error(`[${backupConfigId}] ${errorMsg}`); + } else { + errorMsg = jobStatus.error || `RClone group ${groupIdToMonitor} failed.`; + this.logger.error(`[${backupConfigId}] ${errorMsg}`, jobStatus.stats?.lastError); + } + + if (onError) { + onError(errorMsg); + } + return { + success: false, + error: errorMsg, + destinationPath: `${config.remoteName}:${config.destinationPath}`, + metadata: { + groupId: groupIdToMonitor, + remoteName: config.remoteName, + remotePath: config.destinationPath, + transferOptions: config.transferOptions, + stats: jobStatus.stats, + }, + cleanupRequired: config.cleanupOnFailure, + }; + } + } + + async validate( + config: RCloneDestinationConfig + ): Promise<{ valid: boolean; error?: string; warnings?: string[] }> { + const warnings: string[] = []; + + if (!config.remoteName) { + return { valid: false, error: 'Remote name is required' }; + } + + if (!config.destinationPath) { + return { valid: false, error: 'Remote path is required' }; + } + + if (config.useStreaming) { + if (!config.sourceCommand) { + return { valid: false, error: 'Source command is required for streaming backups' }; + } + if (!config.sourceArgs || config.sourceArgs.length === 0) { + return { valid: false, error: 'Source arguments are required for streaming backups' }; + } + } + + try { + const remotes = await this.rcloneApiService.listRemotes(); + if (!remotes.includes(config.remoteName)) { + return { valid: false, error: `Remote '${config.remoteName}' not found` }; + } + } catch (error) { + return { valid: false, error: 'Failed to validate remote configuration' }; + } + + if (config.timeout < 60000) { + warnings.push('Timeout is less than 1 minute, which may be too short for large uploads'); + } + + return { valid: true, warnings }; + } + + async cleanup(result: BackupDestinationResult): Promise { + if (!result.cleanupRequired || !result.destinationPath) { + return; + } + + const idToStop = result.metadata?.groupId || result.metadata?.jobId; + + try { + this.logger.log(`Cleaning up failed upload at ${result.destinationPath}`); + + if (idToStop) { + await this.rcloneApiService.stopJob(idToStop as string); + if (result.metadata?.groupId) { + this.logger.log(`Stopped RClone group: ${result.metadata.groupId}`); + } else if (result.metadata?.jobId) { + this.logger.log( + `Attempted to stop RClone job: ${result.metadata.jobId} (Note: Group ID preferred for cleanup)` + ); + } + } + } catch (error) { + this.logger.warn( + `Failed to cleanup destination: ${error instanceof Error ? error.message : 'Unknown error'}` + ); + } + } + + get supportsStreaming(): boolean { + return true; + } + + get getWritableStream(): ( + config: RCloneDestinationConfig, + jobId: string, + options?: BackupDestinationProcessorOptions + ) => Promise { + return async ( + config: RCloneDestinationConfig, + jobId: string, + options: BackupDestinationProcessorOptions = {} + ): Promise => { + const validation = await this.validate(config); + if (!validation.valid) { + const errorMsg = `RClone destination configuration validation failed: ${validation.error}`; + this.logger.error(`[${jobId}] ${errorMsg}`); + throw new Error(errorMsg); + } + + const rcloneDest = `${config.remoteName}:${config.destinationPath}`; + const rcloneArgs = ['rcat', rcloneDest, '--progress']; + + this.logger.log( + `[${jobId}] Preparing writable stream for rclone rcat to ${rcloneDest} with progress` + ); + + try { + const rcloneProcess = execa('rclone', rcloneArgs, {}); + + const completionPromise = new Promise((resolve, reject) => { + let stderrOutput = ''; + let stdoutOutput = ''; + + rcloneProcess.stderr?.on('data', (data) => { + const chunk = data.toString(); + stderrOutput += chunk; + this.logger.verbose(`[${jobId}] rclone rcat stderr: ${chunk.trim()}`); + + const progressMatch = chunk.match(/(\d+)%/); + if (progressMatch && progressMatch[1] && options.onProgress) { + const percentage = parseInt(progressMatch[1], 10); + if (!isNaN(percentage)) { + options.onProgress(percentage); + } + } + }); + + rcloneProcess.stdout?.on('data', (data) => { + const chunk = data.toString(); + stdoutOutput += chunk; + this.logger.verbose(`[${jobId}] rclone rcat stdout: ${chunk.trim()}`); + }); + + rcloneProcess + .then((result) => { + this.logger.log( + `[${jobId}] rclone rcat to ${rcloneDest} completed successfully.` + ); + resolve({ + success: true, + destinationPath: rcloneDest, + metadata: { stdout: stdoutOutput, stderr: stderrOutput }, + }); + }) + .catch((error) => { + const errorMessage = + error.stderr || error.message || 'rclone rcat command failed'; + this.logger.error( + `[${jobId}] rclone rcat to ${rcloneDest} failed: ${errorMessage}`, + error.stack + ); + reject({ + success: false, + error: errorMessage, + destinationPath: rcloneDest, + metadata: { stdout: stdoutOutput, stderr: stderrOutput }, + }); + }); + }); + + if (!rcloneProcess.stdin) { + const errMsg = 'Failed to get stdin stream from rclone process.'; + this.logger.error(`[${jobId}] ${errMsg}`); + throw new Error(errMsg); + } + + return { + stream: rcloneProcess.stdin, + completionPromise, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`[${jobId}] Failed to start rclone rcat process: ${errorMessage}`); + throw new Error(`Failed to start rclone rcat process: ${errorMessage}`); + } + }; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.ts b/api/src/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.ts new file mode 100644 index 0000000000..02dbcac7f3 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/jsonforms/backup-jsonforms-config.ts @@ -0,0 +1,189 @@ +import type { LabelElement, Layout, SchemaBasedCondition } from '@jsonforms/core'; +import { JsonSchema7, RuleEffect } from '@jsonforms/core'; + +import type { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import type { DataSlice, SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js'; +import { getDestinationConfigSlice } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-jsonforms.config.js'; +import { getSourceConfigSlice } from '@app/unraid-api/graph/resolvers/backup/source/backup-source-jsonforms.config.js'; +import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js'; +import { mergeSettingSlices } from '@app/unraid-api/types/json-forms.js'; + +function getBasicBackupConfigSlice(): SettingSlice { + const basicConfigElements: UIElement[] = [ + createLabeledControl({ + scope: '#/properties/name', + label: 'Backup Job Name', + description: 'A descriptive name for this backup job (e.g., "Weekly Documents Backup")', + controlOptions: { + placeholder: 'Enter backup job name', + format: 'string', + }, + }), + + createLabeledControl({ + scope: '#/properties/schedule', + label: 'Schedule (Cron Expression)', + description: + 'When to run this backup job. Leave empty for manual execution only. Examples: "0 2 * * *" (daily at 2AM), "0 2 * * 0" (weekly on Sunday at 2AM)', + controlOptions: { + placeholder: 'Leave empty for manual backup', + format: 'string', + suggestions: [ + { + value: '', + label: 'Manual Only', + tooltip: 'No automatic schedule - run manually only', + }, + { + value: '0 2 * * *', + label: 'Daily at 2:00 AM', + tooltip: 'Runs every day at 2:00 AM', + }, + { + value: '0 2 * * 0', + label: 'Weekly (Sunday 2:00 AM)', + tooltip: 'Runs every Sunday at 2:00 AM', + }, + { + value: '0 9 * * 1', + label: 'Mondays at 9:00 AM', + tooltip: 'Runs every Monday at 9:00 AM', + }, + { + value: '0 0 1 * *', + label: 'Monthly (1st day at midnight)', + tooltip: 'Runs on the 1st day of every month at midnight', + }, + { + value: '0 2 1 * *', + label: 'Monthly (1st at 2:00 AM)', + tooltip: 'Runs on the 1st of every month at 2:00 AM', + }, + { + value: '0 2 * * 1-5', + label: 'Weekdays at 2:00 AM', + tooltip: 'Runs Monday through Friday at 2:00 AM', + }, + ], + }, + }), + + createLabeledControl({ + scope: '#/properties/enabled', + label: 'Enable Backup Job', + description: 'Whether this backup job should run automatically according to the schedule', + controlOptions: { + toggle: true, + }, + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/schedule', + schema: { + type: 'string', + minLength: 1, + }, + } as SchemaBasedCondition, + }, + }), + ]; + + const basicConfigProperties: Record = { + name: { + type: 'string', + title: 'Backup Job Name', + description: 'Human-readable name for this backup job', + minLength: 1, + maxLength: 100, + }, + schedule: { + type: 'string', + title: 'Cron Schedule', + description: 'Cron schedule expression (empty for manual execution)', + }, + enabled: { + type: 'boolean', + title: 'Enabled', + description: 'Whether this backup job is enabled', + default: true, + }, + configStep: { + type: 'object', + properties: { + current: { type: 'integer', default: 0 }, + total: { type: 'integer', default: 3 }, + }, + default: { current: 0, total: 3 }, + }, + }; + + const verticalLayoutElement: UIElement = { + type: 'VerticalLayout', + elements: basicConfigElements, + options: { step: 0 }, + }; + + return { + properties: basicConfigProperties as unknown as DataSlice, + elements: [verticalLayoutElement], + }; +} + +export function buildBackupJobConfigSchema({ remotes = [] }: { remotes?: RCloneRemote[] }): { + dataSchema: { properties: DataSlice; type: 'object' }; + uiSchema: Layout; +} { + const slicesToMerge: SettingSlice[] = []; + + const basicSlice = getBasicBackupConfigSlice(); + slicesToMerge.push(basicSlice); + + const sourceSlice = getSourceConfigSlice(); + slicesToMerge.push(sourceSlice); + + const destinationSlice = getDestinationConfigSlice({ remotes }); + slicesToMerge.push(destinationSlice); + + const mergedSlices = mergeSettingSlices(slicesToMerge); + + const dataSchema: { properties: DataSlice; type: 'object' } = { + type: 'object', + properties: mergedSlices.properties, + }; + + const steps = [ + { label: 'Backup Configuration', description: 'Basic backup job settings' }, + { label: 'Source Configuration', description: 'Configure what to backup' }, + { label: 'Destination Configuration', description: 'Configure where to backup' }, + ]; + + const step0Elements = basicSlice.elements; + const step1Elements = sourceSlice.elements; + const step2Elements = destinationSlice.elements; + + const steppedLayoutElement: UIElement = { + type: 'SteppedLayout', + options: { + steps: steps, + }, + elements: [...(step0Elements || []), ...(step1Elements || []), ...(step2Elements || [])].filter( + (el) => el + ) as UIElement[], + }; + + const titleLabel: UIElement = { + type: 'Label', + text: 'Create Backup Job', + options: { + format: 'title', + description: 'Configure a new scheduled backup job with RClone.', + }, + }; + + const uiSchema: Layout = { + type: 'VerticalLayout', + elements: [titleLabel, steppedLayoutElement], + }; + + return { dataSchema, uiSchema }; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.ts b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.ts new file mode 100644 index 0000000000..f1af504572 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.ts @@ -0,0 +1,76 @@ +import { Field, GraphQLISODateTime, Int, ObjectType, registerEnumType } from '@nestjs/graphql'; + +import { Node } from '@unraid/shared/graphql.model'; + +// Moved BackupJobStatus enum here +export enum BackupJobStatus { + QUEUED = 'Queued', + RUNNING = 'Running', + COMPLETED = 'Completed', + FAILED = 'Failed', + CANCELLED = 'Cancelled', +} + +registerEnumType(BackupJobStatus, { + name: 'BackupJobStatus', + description: 'Status of a backup job', +}); + +@ObjectType({ + implements: () => Node, +}) +export class JobStatus extends Node { + @Field(() => String, { description: 'External job ID from the job execution system' }) + externalJobId!: string; + + @Field() + name!: string; + + @Field(() => BackupJobStatus) + status!: BackupJobStatus; + + @Field(() => Int, { description: 'Progress percentage (0-100)' }) + progress!: number; + + @Field({ nullable: true }) + message?: string; + + @Field({ nullable: true }) + error?: string; + + @Field(() => GraphQLISODateTime) + startTime!: Date; + + @Field(() => GraphQLISODateTime, { nullable: true }) + endTime?: Date; + + @Field(() => Int, { nullable: true, description: 'Bytes transferred' }) + bytesTransferred?: number; + + @Field(() => Int, { nullable: true, description: 'Total bytes to transfer' }) + totalBytes?: number; + + @Field(() => Int, { nullable: true, description: 'Transfer speed in bytes per second' }) + speed?: number; + + @Field(() => Int, { nullable: true, description: 'Elapsed time in seconds' }) + elapsedTime?: number; + + @Field(() => Int, { nullable: true, description: 'Estimated time to completion in seconds' }) + eta?: number; + + @Field(() => String, { nullable: true, description: 'Human-readable bytes transferred' }) + formattedBytesTransferred?: string; + + @Field(() => String, { nullable: true, description: 'Human-readable transfer speed' }) + formattedSpeed?: string; + + @Field(() => String, { nullable: true, description: 'Human-readable elapsed time' }) + formattedElapsedTime?: string; + + @Field(() => String, { nullable: true, description: 'Human-readable ETA' }) + formattedEta?: string; +} + +// Use JobStatus as the unified type for both GraphQL and TypeScript +export type JobStatusInfo = JobStatus; diff --git a/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.resolver.ts b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.resolver.ts new file mode 100644 index 0000000000..7e93b2a6f8 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.resolver.ts @@ -0,0 +1,30 @@ +import { Args, Query, Resolver } from '@nestjs/graphql'; + +import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js'; + +import { JobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js'; + +@Resolver(() => JobStatus) +export class BackupJobStatusResolver { + constructor(private readonly jobTrackingService: BackupJobTrackingService) {} + + @Query(() => JobStatus, { name: 'backupJobStatus', nullable: true }) + async getJobStatus( + @Args('jobId', { type: () => PrefixedID }) jobId: string + ): Promise { + const jobStatus = this.jobTrackingService.getJobStatus(jobId); + if (!jobStatus) { + // Optionally throw NotFoundException or return null based on desired API behavior + // throw new NotFoundException(`Job with ID ${jobId} not found.`); + return null; + } + return jobStatus as JobStatus; // Map JobStatusInfo to JobStatusGQL if necessary + } + + @Query(() => [JobStatus], { name: 'allBackupJobStatuses' }) + async getAllJobStatuses(): Promise { + const statuses = this.jobTrackingService.getAllJobStatuses(); + return statuses as JobStatus[]; // Map JobStatusInfo[] to JobStatusGQL[] if necessary + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.ts b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.ts new file mode 100644 index 0000000000..eb043e1d81 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.ts @@ -0,0 +1,119 @@ +import { Injectable, Logger } from '@nestjs/common'; + +import { + BackupJobStatus, + JobStatus, +} from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; + +@Injectable() +export class BackupJobTrackingService { + private readonly logger = new Logger(BackupJobTrackingService.name); + private activeJobs: Map = new Map(); // Maps internal ID -> JobStatus + private externalJobIndex: Map = new Map(); // Maps external ID -> internal ID + + constructor() { + // Potentially load persisted jobs if needed + } + + initializeJob(externalJobId: string, jobName: string): JobStatus { + // Check if external job already exists + const existingInternalId = this.externalJobIndex.get(externalJobId); + if (existingInternalId && this.activeJobs.has(existingInternalId)) { + this.logger.warn(`Job with external ID ${externalJobId} is already initialized.`); + return this.activeJobs.get(existingInternalId)!; + } + + const internalId = `job_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; + const newJobStatus: JobStatus = { + id: internalId, + externalJobId, + name: jobName, + status: BackupJobStatus.QUEUED, + progress: 0, + startTime: new Date(), + message: 'Job initialized.', + }; + + this.activeJobs.set(internalId, newJobStatus); + this.externalJobIndex.set(externalJobId, internalId); + this.logger.log( + `Job initialized: ${jobName} (Internal ID: ${internalId}, External ID: ${externalJobId})` + ); + return newJobStatus; + } + + updateJobStatus( + internalId: string, + updates: Partial> + ): JobStatus | null { + const job = this.activeJobs.get(internalId); + if (!job) { + this.logger.warn(`Cannot update status for unknown internal job ID: ${internalId}`); + return null; + } + + const updatedJob = { ...job, ...updates }; + + if ( + updates.status === BackupJobStatus.COMPLETED || + updates.status === BackupJobStatus.FAILED || + updates.status === BackupJobStatus.CANCELLED + ) { + updatedJob.endTime = new Date(); + updatedJob.progress = updates.status === BackupJobStatus.COMPLETED ? 100 : job.progress; + } + + if (updatedJob.progress > 100) { + updatedJob.progress = 100; + } + + this.activeJobs.set(internalId, updatedJob); + this.logger.log( + `Job status updated for ${job.name} (Internal ID: ${internalId}): Status: ${updatedJob.status}, Progress: ${updatedJob.progress}%` + ); + return updatedJob; + } + + // For external systems (like RClone) to update job status + updateJobStatusByExternalId( + externalJobId: string, + updates: Partial> + ): JobStatus | null { + const internalId = this.externalJobIndex.get(externalJobId); + if (!internalId) { + this.logger.warn(`Cannot find internal job for external ID: ${externalJobId}`); + return null; + } + return this.updateJobStatus(internalId, updates); + } + + getJobStatus(internalId: string): JobStatus | undefined { + return this.activeJobs.get(internalId); + } + + getJobStatusByExternalId(externalJobId: string): JobStatus | undefined { + const internalId = this.externalJobIndex.get(externalJobId); + return internalId ? this.activeJobs.get(internalId) : undefined; + } + + getAllJobStatuses(): JobStatus[] { + return Array.from(this.activeJobs.values()); + } + + clearJob(internalId: string): boolean { + const job = this.activeJobs.get(internalId); + if (job) { + this.externalJobIndex.delete(job.externalJobId); + } + return this.activeJobs.delete(internalId); + } + + clearJobByExternalId(externalJobId: string): boolean { + const internalId = this.externalJobIndex.get(externalJobId); + if (internalId) { + this.externalJobIndex.delete(externalJobId); + return this.activeJobs.delete(internalId); + } + return false; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.ts b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.ts new file mode 100644 index 0000000000..01bee70470 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/orchestration/backup-orchestration.service.ts @@ -0,0 +1,534 @@ +import { forwardRef, Inject, Injectable, Logger } from '@nestjs/common'; +import { Readable } from 'stream'; +import { pipeline } from 'stream/promises'; // Using stream.pipeline for better error handling + +import { BackupConfigService } from '@app/unraid-api/graph/resolvers/backup/backup-config.service.js'; +import { BackupJobConfig } from '@app/unraid-api/graph/resolvers/backup/backup.model.js'; +import { + BackupDestinationProcessor, + BackupDestinationProcessorOptions, + BackupDestinationResult, + StreamingDestinationHandle, // Assuming this will be defined in the interface file +} from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination-processor.interface.js'; +import { BackupDestinationService } from '@app/unraid-api/graph/resolvers/backup/destination/backup-destination.service.js'; +import { + BackupJobStatus, + JobStatus, +} from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { BackupJobTrackingService } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-tracking.service.js'; +import { + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { BackupSourceService } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.service.js'; + +@Injectable() +export class BackupOrchestrationService { + private readonly logger = new Logger(BackupOrchestrationService.name); + + constructor( + private readonly jobTrackingService: BackupJobTrackingService, + private readonly backupSourceService: BackupSourceService, + private readonly backupDestinationService: BackupDestinationService, + @Inject(forwardRef(() => BackupConfigService)) + private readonly backupConfigService: BackupConfigService + ) {} + + async executeBackupJob(jobConfig: BackupJobConfig, configId: string): Promise { + this.logger.log( + `Starting orchestration for backup job: ${jobConfig.name} (Config ID: ${configId})` + ); + + // Initialize job in tracking service and get the internal tracking object + // configId (original jobConfig.id) is used to link tracking to config, jobConfig.name is for display + const jobStatus = this.jobTrackingService.initializeJob(configId, jobConfig.name); + const internalJobId = jobStatus.id; // This is the actual ID for this specific job run + + // DO NOT call backupConfigService.updateBackupJobConfig here for currentJobId + // This will be handled by BackupConfigService itself using the returned internalJobId + + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.RUNNING, + progress: 0, + message: 'Job initializing...', + }); + + const sourceProcessor = this.backupSourceService.getProcessor(jobConfig.sourceType); + const destinationProcessor = this.backupDestinationService.getProcessor( + jobConfig.destinationType + ); + + if (!sourceProcessor || !destinationProcessor) { + const errorMsg = 'Failed to initialize backup processors.'; + this.logger.error(`[Config ID: ${configId}, Job ID: ${internalJobId}] ${errorMsg}`); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: errorMsg, + }); + // Call handleJobCompletion before throwing + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.FAILED, + internalJobId + ); + throw new Error(errorMsg); + } + + try { + if (sourceProcessor.supportsStreaming && destinationProcessor.supportsStreaming) { + await this.executeStreamingBackup( + sourceProcessor, + destinationProcessor, + jobConfig, + internalJobId + ); + } else { + await this.executeRegularBackup( + sourceProcessor, + destinationProcessor, + jobConfig, + internalJobId, + configId // Pass configId for handleJobCompletion + ); + } + // If executeStreamingBackup/executeRegularBackup complete without throwing, it implies success for those stages. + // The final status (COMPLETED/FAILED) is set within those methods via emitJobStatus and then handleJobCompletion. + } catch (error) { + // Errors from executeStreamingBackup/executeRegularBackup should have already called handleJobCompletion. + // This catch is a fallback. + this.logger.error( + `[Config ID: ${configId}, Job ID: ${internalJobId}] Orchestration error after backup execution attempt: ${(error as Error).message}` + ); + // Ensure completion is handled if not already done by the execution methods + // This might be redundant if execution methods are guaranteed to call it. + // However, direct throws before or after calling those methods would be caught here. + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.FAILED, + internalJobId + ); + throw error; // Re-throw the error + } + // DO NOT clear currentJobId here using updateBackupJobConfig. It's handled by handleJobCompletion. + + this.logger.log( + `Finished orchestration logic for backup job: ${jobConfig.name} (Config ID: ${configId}, Job ID: ${internalJobId})` + ); + return internalJobId; // Return the actual job ID for this run + } + + private async executeStreamingBackup( + sourceProcessor: BackupSourceProcessor, + destinationProcessor: BackupDestinationProcessor, + jobConfig: BackupJobConfig, // This is the config object, not its ID + internalJobId: string + ): Promise { + this.logger.log( + `Executing STREAMING backup for job: ${jobConfig.name} (Internal Job ID: ${internalJobId})` + ); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.RUNNING, + progress: 0, + message: 'Starting streaming backup...', + }); + + if (!sourceProcessor.getReadableStream || !destinationProcessor.getWritableStream) { + const errorMsg = + 'Source or destination processor does not support streaming (missing getReadableStream or getWritableStream).'; + this.logger.error(`[${internalJobId}] ${errorMsg}`); + this.emitJobStatus(internalJobId, { status: BackupJobStatus.FAILED, error: errorMsg }); + // Call handleJobCompletion before throwing + await this.backupConfigService.handleJobCompletion(internalJobId, BackupJobStatus.FAILED); + throw new Error(errorMsg); + } + + let sourceStream: Readable | null = null; + let destinationStreamHandle: StreamingDestinationHandle | null = null; + + const processorOptions: BackupDestinationProcessorOptions = { + jobId: internalJobId, + onProgress: (progress: number) => { + this.logger.log(`[${internalJobId}] Destination progress: ${progress}%`); + this.emitJobStatus(internalJobId, { progress: Math.min(progress, 99) }); + }, + onOutput: (data: string) => { + this.logger.debug(`[${internalJobId} Dest. Processor Output]: ${data}`); + }, + onError: (errorMsg: string) => { + this.logger.warn(`[${internalJobId} Dest. Processor Error]: ${errorMsg}`); + }, + }; + + try { + this.logger.debug(`[${internalJobId}] Preparing source stream...`); + sourceStream = await sourceProcessor.getReadableStream(jobConfig.sourceConfig); + this.logger.debug( + `[${internalJobId}] Source stream prepared. Preparing destination stream...` + ); + destinationStreamHandle = await destinationProcessor.getWritableStream( + jobConfig.destinationConfig, + internalJobId, + processorOptions + ); + this.logger.debug(`[${internalJobId}] Destination stream prepared. Starting stream pipe.`); + + if (!sourceStream || !destinationStreamHandle?.stream) { + throw new Error('Failed to initialize source or destination stream.'); + } + + let totalBytesProcessed = 0; + sourceStream.on('data', (chunk) => { + totalBytesProcessed += chunk.length; + this.logger.verbose( + `[${internalJobId}] Stream data: ${chunk.length} bytes, Total: ${totalBytesProcessed}` + ); + }); + + await pipeline(sourceStream, destinationStreamHandle.stream); + + this.logger.log( + `[${internalJobId}] Stream piping completed. Waiting for destination processor to finalize...` + ); + + const destinationResult = await destinationStreamHandle.completionPromise; + + if (!destinationResult.success) { + const errorMsg = + destinationResult.error || 'Destination processor failed after streaming.'; + this.logger.error(`[${internalJobId}] ${errorMsg}`); + this.emitJobStatus(internalJobId, { status: BackupJobStatus.FAILED, error: errorMsg }); + // Call handleJobCompletion before throwing + await this.backupConfigService.handleJobCompletion( + jobConfig.id, + BackupJobStatus.FAILED, + internalJobId + ); + throw new Error(errorMsg); + } + + this.logger.log( + `Streaming backup job ${jobConfig.name} (Internal ID: ${internalJobId}) completed successfully.` + ); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.COMPLETED, + progress: 100, + message: 'Backup completed successfully.', + }); + // Call handleJobCompletion on success + await this.backupConfigService.handleJobCompletion( + jobConfig.id, + BackupJobStatus.COMPLETED, + internalJobId + ); + + if (sourceProcessor.cleanup) { + this.logger.debug(`[${internalJobId}] Performing post-success cleanup for source...`); + await sourceProcessor.cleanup({ + success: true, + outputPath: 'streamed', + cleanupRequired: false, + } as any); + } + if (destinationProcessor.cleanup) { + this.logger.debug( + `[${internalJobId}] Performing post-success cleanup for destination...` + ); + await destinationProcessor.cleanup({ success: true, cleanupRequired: false }); + } + } catch (e) { + const error = e as Error; + this.logger.error( + `Streaming backup job ${jobConfig.name} (Internal ID: ${internalJobId}) failed: ${error.message}`, + error.stack + ); + + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: error.message, + message: 'Backup failed during streaming execution.', + }); + // Call handleJobCompletion on failure + await this.backupConfigService.handleJobCompletion( + jobConfig.id, + BackupJobStatus.FAILED, + internalJobId + ); + + this.logger.error( + `[${internalJobId}] Performing cleanup due to failure for job ${jobConfig.name}...` + ); + try { + if (sourceProcessor.cleanup) { + this.logger.debug(`[${internalJobId}] Cleaning up source processor...`); + await sourceProcessor.cleanup({ + success: false, + error: error.message, + cleanupRequired: true, + } as any); + } + } catch (cleanupError) { + this.logger.error( + `[${internalJobId}] Error during source processor cleanup: ${(cleanupError as Error).message}`, + (cleanupError as Error).stack + ); + } + + try { + if (destinationProcessor.cleanup) { + this.logger.debug(`[${internalJobId}] Cleaning up destination processor...`); + const destCleanupError = + ( + destinationStreamHandle?.completionPromise && + ((await destinationStreamHandle.completionPromise.catch( + (er) => er + )) as BackupDestinationResult) + )?.error || error.message; + await destinationProcessor.cleanup({ + success: false, + error: destCleanupError, + cleanupRequired: true, + }); + } + } catch (cleanupError) { + this.logger.error( + `[${internalJobId}] Error during destination processor cleanup: ${(cleanupError as Error).message}`, + (cleanupError as Error).stack + ); + } + + throw error; + } + } + + private async executeRegularBackup( + sourceProcessor: BackupSourceProcessor, + destinationProcessor: BackupDestinationProcessor, + jobConfig: BackupJobConfig, // This is the config object, not its ID + internalJobId: string, + configId: string // Pass the configId for handleJobCompletion + ): Promise { + this.logger.log( + `Executing REGULAR backup for job: ${jobConfig.name} (Config ID: ${configId}, Internal Job ID: ${internalJobId})` + ); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.RUNNING, + progress: 0, + message: 'Starting regular backup...', + }); + + let sourceResult: BackupSourceResult | null = null; + let destinationResult: BackupDestinationResult | null = null; + + const processorOptions: BackupSourceProcessorOptions & BackupDestinationProcessorOptions = { + jobId: internalJobId, + onProgress: (progressUpdate) => { + const numericProgress = + typeof progressUpdate === 'number' + ? progressUpdate + : (progressUpdate as any).progress; + if (typeof numericProgress === 'number') { + this.emitJobStatus(internalJobId, { progress: numericProgress }); + } + }, + onOutput: (data: string) => { + this.logger.debug(`[${internalJobId} Processor Output]: ${data}`); + }, + onError: (errorMsg: string) => { + this.logger.warn(`[${internalJobId} Processor Error]: ${errorMsg}`); + }, + }; + + try { + this.logger.debug(`[${internalJobId}] Executing source processor...`); + sourceResult = await sourceProcessor.execute(jobConfig.sourceConfig, processorOptions); + this.logger.debug( + `[${internalJobId}] Source processor execution completed. Success: ${sourceResult.success}, OutputPath: ${sourceResult.outputPath}` + ); + + if (!sourceResult.success || !sourceResult.outputPath) { + const errorMsg = + sourceResult.error || 'Source processor failed to produce an output path.'; + this.logger.error(`[${internalJobId}] Source processor failed: ${errorMsg}`); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: errorMsg, + message: 'Source processing failed.', + }); + this.jobTrackingService.updateJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: errorMsg, + }); + // Call handleJobCompletion before throwing + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.FAILED, + internalJobId + ); + throw new Error(errorMsg); + } + this.emitJobStatus(internalJobId, { + progress: 50, + message: 'Source processing complete. Starting destination processing.', + }); + + this.logger.debug( + `[${internalJobId}] Executing destination processor with source output: ${sourceResult.outputPath}...` + ); + destinationResult = await destinationProcessor.execute( + sourceResult.outputPath, + jobConfig.destinationConfig, + processorOptions + ); + this.logger.debug( + `[${internalJobId}] Destination processor execution completed. Success: ${destinationResult.success}` + ); + + if (!destinationResult.success) { + const errorMsg = destinationResult.error || 'Destination processor failed.'; + this.logger.error(`[${internalJobId}] Destination processor failed: ${errorMsg}`); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: errorMsg, + message: 'Destination processing failed.', + }); + this.jobTrackingService.updateJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: errorMsg, + }); + // Call handleJobCompletion before throwing + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.FAILED, + internalJobId + ); + throw new Error(errorMsg); + } + + this.logger.log( + `Regular backup job ${jobConfig.name} (Internal ID: ${internalJobId}) completed successfully.` + ); + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.COMPLETED, + progress: 100, + message: 'Backup completed successfully.', + }); + // Call handleJobCompletion on success + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.COMPLETED, + internalJobId + ); + + if (sourceResult && sourceProcessor.cleanup) { + this.logger.debug( + `[${internalJobId}] Performing post-success cleanup for source processor...` + ); + await sourceProcessor.cleanup(sourceResult); + } + if (destinationResult && destinationProcessor.cleanup) { + this.logger.debug( + `[${internalJobId}] Performing post-success cleanup for destination processor...` + ); + await destinationProcessor.cleanup(destinationResult); + } + } catch (e) { + const error = e as Error; + this.logger.error( + `Regular backup job ${jobConfig.name} (Internal ID: ${internalJobId}) failed: ${error.message}`, + error.stack + ); + + this.emitJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: error.message, + message: 'Backup failed during regular execution.', + }); + this.jobTrackingService.updateJobStatus(internalJobId, { + status: BackupJobStatus.FAILED, + error: error.message, + }); + // Call handleJobCompletion on failure + await this.backupConfigService.handleJobCompletion( + configId, + BackupJobStatus.FAILED, + internalJobId + ); + + this.logger.error( + `[${internalJobId}] Performing cleanup due to failure for job ${jobConfig.name}...` + ); + if (sourceResult && sourceProcessor.cleanup) { + try { + this.logger.debug( + `[${internalJobId}] Cleaning up source processor after failure...` + ); + await sourceProcessor.cleanup({ + ...sourceResult, + success: false, + error: sourceResult.error || error.message, + }); + } catch (cleanupError) { + this.logger.error( + `[${internalJobId}] Error during source processor cleanup: ${(cleanupError as Error).message}`, + (cleanupError as Error).stack + ); + } + } + + if (destinationResult && destinationProcessor.cleanup) { + try { + this.logger.debug( + `[${internalJobId}] Cleaning up destination processor after failure...` + ); + await destinationProcessor.cleanup({ + ...destinationResult, + success: false, + error: destinationResult.error || error.message, + }); + } catch (cleanupError) { + this.logger.error( + `[${internalJobId}] Error during destination processor cleanup: ${(cleanupError as Error).message}`, + (cleanupError as Error).stack + ); + } + } else if (sourceResult?.success && destinationProcessor.cleanup) { + try { + this.logger.debug( + `[${internalJobId}] Cleaning up destination processor after a failure (destinationResult not available)...` + ); + await destinationProcessor.cleanup({ + success: false, + error: error.message, + cleanupRequired: true, + }); + } catch (cleanupError) { + this.logger.error( + `[${internalJobId}] Error during destination processor cleanup (no result): ${(cleanupError as Error).message}`, + (cleanupError as Error).stack + ); + } + } + throw error; + } + } + + private emitJobStatus( + internalJobId: string, + statusUpdate: { + status?: BackupJobStatus; + progress?: number; + message?: string; + error?: string; + } + ): void { + this.logger.log( + `[Job Status Update - ${internalJobId}]: Status: ${statusUpdate.status}, Progress: ${statusUpdate.progress}, Msg: ${statusUpdate.message}, Err: ${statusUpdate.error}` + ); + + const updatePayload: Partial> = { + ...statusUpdate, + }; + this.jobTrackingService.updateJobStatus(internalJobId, updatePayload); + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/backup-source-jsonforms.config.ts b/api/src/unraid-api/graph/resolvers/backup/source/backup-source-jsonforms.config.ts new file mode 100644 index 0000000000..866b3425cd --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/backup-source-jsonforms.config.ts @@ -0,0 +1,503 @@ +import type { LabelElement, SchemaBasedCondition } from '@jsonforms/core'; +import { JsonSchema7, RuleEffect } from '@jsonforms/core'; + +import type { DataSlice, SettingSlice, UIElement } from '@app/unraid-api/types/json-forms.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { createLabeledControl } from '@app/unraid-api/graph/utils/form-utils.js'; + +export function getSourceConfigSlice(): SettingSlice { + const sourceConfigElements: UIElement[] = [ + { + type: 'Control', + scope: '#/properties/sourceConfig/properties/type', + options: { + format: 'radio', + radioLayout: 'horizontal', + options: [ + { + label: 'ZFS Snapshot', + value: SourceType.ZFS, + description: 'Create ZFS snapshot and backup', + }, + { + label: 'Flash Drive', + value: SourceType.FLASH, + description: 'Backup flash drive contents', + }, + { + label: 'Custom Script', + value: SourceType.SCRIPT, + description: 'Run custom script to generate backup data', + }, + { + label: 'Raw Files', + value: SourceType.RAW, + description: 'Direct file backup without preprocessing', + }, + ], + }, + }, + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/timeout', + label: 'Timeout', + description: 'Timeout in seconds for backup operation', + controlOptions: { + placeholder: '3600', + format: 'number', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/cleanupOnFailure', + label: 'Cleanup on Failure', + description: 'Clean up backup artifacts on failure', + controlOptions: { + format: 'toggle', + }, + }), + + // Raw Backup Configuration + { + type: 'VerticalLayout', + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/sourceConfig/properties/type', + schema: { const: SourceType.RAW }, + } as SchemaBasedCondition, + }, + elements: [ + { + type: 'Label', + text: 'Raw Backup Configuration', + options: { + description: 'Configure direct file/folder backup settings.', + }, + } as LabelElement, + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/rawConfig/properties/sourcePath', + label: 'Source Path', + description: 'Source path to backup', + controlOptions: { + placeholder: '/mnt/user/data', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/rawConfig/properties/excludePatterns', + label: 'Exclude Patterns', + description: 'Patterns to exclude from backup', + controlOptions: { + placeholder: '*.tmp,*.log', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/rawConfig/properties/includePatterns', + label: 'Include Patterns', + description: 'Patterns to include in backup', + controlOptions: { + placeholder: '*.txt,*.doc', + }, + }), + ], + }, + + // ZFS Configuration + { + type: 'VerticalLayout', + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/sourceConfig/properties/type', + schema: { const: SourceType.ZFS }, + } as SchemaBasedCondition, + }, + elements: [ + { + type: 'Label', + text: 'ZFS Configuration', + options: { + description: 'Configure ZFS snapshot settings for backup.', + }, + } as LabelElement, + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/zfsConfig/properties/poolName', + label: 'Pool Name', + description: 'ZFS pool name', + controlOptions: { + placeholder: 'tank', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/zfsConfig/properties/datasetName', + label: 'Dataset Name', + description: 'ZFS dataset name', + controlOptions: { + placeholder: 'data', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/zfsConfig/properties/snapshotPrefix', + label: 'Snapshot Prefix', + description: 'Prefix for snapshot names', + controlOptions: { + placeholder: 'backup', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/zfsConfig/properties/cleanupSnapshots', + label: 'Cleanup Snapshots', + description: 'Clean up snapshots after backup', + controlOptions: { + format: 'checkbox', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/zfsConfig/properties/retainSnapshots', + label: 'Retain Snapshots', + description: 'Number of snapshots to retain', + controlOptions: { + placeholder: '5', + format: 'number', + }, + }), + ], + }, + + // Flash Configuration + { + type: 'VerticalLayout', + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/sourceConfig/properties/type', + schema: { const: SourceType.FLASH }, + } as SchemaBasedCondition, + }, + elements: [ + { + type: 'Label', + text: 'Flash Backup Configuration', + options: { + description: 'Configure Unraid flash drive backup settings.', + }, + } as LabelElement, + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/flashConfig/properties/flashPath', + label: 'Flash Path', + description: 'Path to flash drive', + controlOptions: { + placeholder: '/boot', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/flashConfig/properties/includeGitHistory', + label: 'Include Git History', + description: 'Include git history in backup', + controlOptions: { + format: 'checkbox', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/flashConfig/properties/additionalPaths', + label: 'Additional Paths', + description: 'Additional paths to include', + controlOptions: { + placeholder: '/etc/config', + }, + }), + ], + }, + + // Script Configuration + { + type: 'VerticalLayout', + rule: { + effect: RuleEffect.SHOW, + condition: { + scope: '#/properties/sourceConfig/properties/type', + schema: { const: SourceType.SCRIPT }, + } as SchemaBasedCondition, + }, + elements: [ + { + type: 'Label', + text: 'Custom Script Configuration', + options: { + description: 'Configure custom script execution settings.', + }, + } as LabelElement, + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/scriptConfig/properties/scriptPath', + label: 'Script Path', + description: 'Path to script file', + controlOptions: { + placeholder: '/usr/local/bin/backup.sh', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/scriptConfig/properties/scriptArgs', + label: 'Script Arguments', + description: 'Arguments for script', + controlOptions: { + placeholder: '--verbose --compress', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/scriptConfig/properties/workingDirectory', + label: 'Working Directory', + description: 'Working directory for script', + controlOptions: { + placeholder: '/tmp', + }, + }), + + createLabeledControl({ + scope: '#/properties/sourceConfig/properties/scriptConfig/properties/outputPath', + label: 'Output Path', + description: 'Path for script output', + controlOptions: { + placeholder: '/tmp/backup.tar.gz', + }, + }), + ], + }, + ]; + + const sourceConfigProperties: Record = { + sourceConfig: { + type: 'object', + title: 'Source Configuration', + description: 'Configuration for backup source', + properties: { + type: { + type: 'string', + title: 'Backup Type', + description: 'Type of backup to perform', + enum: [SourceType.ZFS, SourceType.FLASH, SourceType.SCRIPT, SourceType.RAW], + default: SourceType.ZFS, + }, + timeout: { + type: 'integer', + title: 'Timeout', + description: 'Timeout in seconds for backup operation', + minimum: 30, + maximum: 86400, + default: 3600, + }, + cleanupOnFailure: { + type: 'boolean', + title: 'Cleanup on Failure', + description: 'Clean up backup artifacts on failure', + default: true, + }, + rawConfig: { + type: 'object', + title: 'Raw Backup Configuration', + properties: { + sourcePath: { + type: 'string', + title: 'Source Path', + description: 'Source path to backup', + minLength: 1, + }, + excludePatterns: { + type: 'array', + title: 'Exclude Patterns', + description: 'Patterns to exclude from backup', + items: { + type: 'string', + }, + default: [], + }, + includePatterns: { + type: 'array', + title: 'Include Patterns', + description: 'Patterns to include in backup', + items: { + type: 'string', + }, + default: [], + }, + }, + required: ['sourcePath'], + }, + zfsConfig: { + type: 'object', + title: 'ZFS Configuration', + properties: { + poolName: { + type: 'string', + title: 'Pool Name', + description: 'ZFS pool name', + minLength: 1, + }, + datasetName: { + type: 'string', + title: 'Dataset Name', + description: 'ZFS dataset name', + minLength: 1, + }, + snapshotPrefix: { + type: 'string', + title: 'Snapshot Prefix', + description: 'Prefix for snapshot names', + default: 'backup', + }, + cleanupSnapshots: { + type: 'boolean', + title: 'Cleanup Snapshots', + description: 'Clean up snapshots after backup', + default: true, + }, + retainSnapshots: { + type: 'integer', + title: 'Retain Snapshots', + description: 'Number of snapshots to retain', + minimum: 0, + default: 5, + }, + }, + required: ['poolName', 'datasetName'], + }, + flashConfig: { + type: 'object', + title: 'Flash Configuration', + properties: { + flashPath: { + type: 'string', + title: 'Flash Path', + description: 'Path to flash drive', + default: '/boot', + }, + includeGitHistory: { + type: 'boolean', + title: 'Include Git History', + description: 'Include git history in backup', + default: true, + }, + additionalPaths: { + type: 'array', + title: 'Additional Paths', + description: 'Additional paths to include', + items: { + type: 'string', + }, + default: [], + }, + }, + }, + scriptConfig: { + type: 'object', + title: 'Script Configuration', + properties: { + scriptPath: { + type: 'string', + title: 'Script Path', + description: 'Path to script file', + minLength: 1, + }, + scriptArgs: { + type: 'array', + title: 'Script Arguments', + description: 'Arguments for script', + items: { + type: 'string', + }, + default: [], + }, + workingDirectory: { + type: 'string', + title: 'Working Directory', + description: 'Working directory for script', + default: '/tmp', + }, + outputPath: { + type: 'string', + title: 'Output Path', + description: 'Path for script output', + minLength: 1, + }, + }, + required: ['scriptPath', 'outputPath'], + }, + }, + required: ['type'], + }, + }; + + // Apply conditional logic for sourceConfig + if (sourceConfigProperties.sourceConfig && typeof sourceConfigProperties.sourceConfig === 'object') { + sourceConfigProperties.sourceConfig.allOf = [ + { + if: { properties: { type: { const: SourceType.RAW } }, required: ['type'] }, + then: { + required: ['rawConfig'], + properties: { + zfsConfig: { not: {} }, + flashConfig: { not: {} }, + scriptConfig: { not: {} }, + }, + }, + }, + { + if: { properties: { type: { const: SourceType.ZFS } }, required: ['type'] }, + then: { + required: ['zfsConfig'], + properties: { + rawConfig: { not: {} }, + flashConfig: { not: {} }, + scriptConfig: { not: {} }, + }, + }, + }, + { + if: { properties: { type: { const: SourceType.FLASH } }, required: ['type'] }, + then: { + required: ['flashConfig'], + properties: { + rawConfig: { not: {} }, + zfsConfig: { not: {} }, + scriptConfig: { not: {} }, + }, + }, + }, + { + if: { properties: { type: { const: SourceType.SCRIPT } }, required: ['type'] }, + then: { + required: ['scriptConfig'], + properties: { + rawConfig: { not: {} }, + zfsConfig: { not: {} }, + flashConfig: { not: {} }, + }, + }, + }, + ]; + } + + const verticalLayoutElement: UIElement = { + type: 'VerticalLayout', + elements: sourceConfigElements, + options: { step: 1 }, + }; + + return { + properties: sourceConfigProperties, + elements: [verticalLayoutElement], + }; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.ts b/api/src/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.ts new file mode 100644 index 0000000000..5decc148af --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.ts @@ -0,0 +1,53 @@ +import { Readable } from 'stream'; + +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; + +export interface BackupSourceConfig { + timeout: number; + cleanupOnFailure: boolean; +} + +export interface BackupSourceResult { + success: boolean; + outputPath?: string; + streamPath?: string; + snapshotName?: string; + error?: string; + cleanupRequired?: boolean; + metadata?: Record; + + // Streaming support + streamCommand?: string; + streamArgs?: string[]; + supportsStreaming?: boolean; + isStreamingMode?: boolean; +} + +export interface BackupSourceProcessorOptions { + jobId?: string; + onProgress?: (progress: number) => void; + onOutput?: (data: string) => void; + onError?: (error: string) => void; + useStreaming?: boolean; +} + +export abstract class BackupSourceProcessor { + abstract readonly sourceType: SourceType; + + abstract execute( + config: TConfig, + options?: BackupSourceProcessorOptions + ): Promise; + + abstract validate(config: TConfig): Promise<{ valid: boolean; error?: string; warnings?: string[] }>; + + abstract cleanup(result: BackupSourceResult): Promise; + + // Getter to check if processor supports streaming + abstract get supportsStreaming(): boolean; + + // Optional getter to get a readable stream for streaming backups + get getReadableStream(): ((config: TConfig) => Promise) | undefined { + return undefined; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/backup-source.module.ts b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.module.ts new file mode 100644 index 0000000000..f0c3c821c5 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.module.ts @@ -0,0 +1,30 @@ +import { Module } from '@nestjs/common'; + +import { BackupSourceService } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.service.js'; +import { FlashSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.js'; +import { FlashValidationService } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.js'; +import { RawSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.js'; +import { ScriptSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.js'; +import { ZfsSourceProcessor } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.js'; +import { ZfsValidationService } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.js'; + +@Module({ + providers: [ + BackupSourceService, + FlashSourceProcessor, + FlashValidationService, + RawSourceProcessor, + ScriptSourceProcessor, + ZfsSourceProcessor, + ZfsValidationService, + ], + exports: [ + BackupSourceService, + FlashSourceProcessor, + RawSourceProcessor, + ScriptSourceProcessor, + ZfsSourceProcessor, + ZfsValidationService, + ], +}) +export class BackupSourceModule {} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/backup-source.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.service.ts new file mode 100644 index 0000000000..0554053248 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.service.ts @@ -0,0 +1,99 @@ +import { BadRequestException, Injectable, Logger } from '@nestjs/common'; +import { EventEmitter } from 'events'; + +import { v4 as uuidv4 } from 'uuid'; + +import { + BackupSourceConfig, + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { + FlashSourceConfig, + FlashSourceProcessor, +} from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.js'; +import { + RawSourceConfig, + RawSourceProcessor, +} from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.js'; +import { + ScriptSourceConfig, + ScriptSourceProcessor, +} from '@app/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.js'; +import { + ZfsSourceConfig, + ZfsSourceProcessor, +} from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.js'; + +export interface BackupSourceOptions { + jobId?: string; + onProgress?: (progress: number) => void; + onOutput?: (data: string) => void; + onError?: (error: string) => void; +} + +@Injectable() +export class BackupSourceService extends EventEmitter { + private readonly logger = new Logger(BackupSourceService.name); + + constructor( + private readonly flashSourceProcessor: FlashSourceProcessor, + private readonly rawSourceProcessor: RawSourceProcessor, + private readonly scriptSourceProcessor: ScriptSourceProcessor, + private readonly zfsSourceProcessor: ZfsSourceProcessor + ) { + super(); + } + + async processSource( + config: T, + options?: BackupSourceOptions + ): Promise { + const processor = this.getProcessor(config.type); + if (!processor) { + throw new BadRequestException(`Unsupported source type: ${config.type}`); + } + + const processorOptions: BackupSourceProcessorOptions = { + jobId: options?.jobId || uuidv4(), + onProgress: options?.onProgress, + onOutput: options?.onOutput, + onError: options?.onError, + }; + + try { + const result = await processor.execute(config, processorOptions); + this.logger.log(`Source processing completed for type: ${config.type}`); + return result; + } catch (error) { + this.logger.error(`Source processing failed for type: ${config.type}`, error); + throw error; + } + } + + async cancelSourceJob(jobId: string): Promise { + this.logger.log(`Attempting to cancel source job: ${jobId}`); + return false; + } + + async cleanup(): Promise { + this.logger.log('Cleaning up source service...'); + } + + public getProcessor(type: SourceType): BackupSourceProcessor | null { + switch (type) { + case SourceType.FLASH: + return this.flashSourceProcessor; + case SourceType.RAW: + return this.rawSourceProcessor; + case SourceType.SCRIPT: + return this.scriptSourceProcessor; + case SourceType.ZFS: + return this.zfsSourceProcessor; + default: + return null; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/backup-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.types.ts new file mode 100644 index 0000000000..310e569a05 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/backup-source.types.ts @@ -0,0 +1,136 @@ +import { createUnionType, Field, InputType, ObjectType, registerEnumType } from '@nestjs/graphql'; + +import { Type } from 'class-transformer'; +import { IsBoolean, IsEnum, IsNumber, IsOptional, Min, ValidateNested } from 'class-validator'; + +import { + FlashPreprocessConfig, + FlashPreprocessConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js'; +import { + RawBackupConfig, + RawBackupConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.js'; +import { + ScriptPreprocessConfig, + ScriptPreprocessConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/script/script-source.types.js'; +import { + ZfsPreprocessConfig, + ZfsPreprocessConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js'; + +export enum SourceType { + ZFS = 'ZFS', + FLASH = 'FLASH', + SCRIPT = 'SCRIPT', + RAW = 'RAW', +} + +registerEnumType(SourceType, { + name: 'SourceType', + description: + 'Type of backup to perform (ZFS snapshot, Flash backup, Custom script, or Raw file backup)', +}); + +export { ZfsPreprocessConfigInput, ZfsPreprocessConfig }; +export { FlashPreprocessConfigInput, FlashPreprocessConfig }; +export { ScriptPreprocessConfigInput, ScriptPreprocessConfig }; +export { RawBackupConfigInput, RawBackupConfig }; + +@InputType() +export class SourceConfigInput { + @Field(() => SourceType, { nullable: false }) + @IsEnum(SourceType, { message: 'Invalid source type' }) + type!: SourceType; + + @Field(() => Number, { description: 'Timeout for backup operation in seconds', defaultValue: 3600 }) + @IsOptional() + @IsNumber() + @Min(1) + timeout?: number; + + @Field(() => Boolean, { description: 'Whether to cleanup on failure', defaultValue: true }) + @IsOptional() + @IsBoolean() + cleanupOnFailure?: boolean; + + @Field(() => ZfsPreprocessConfigInput, { nullable: true }) + @IsOptional() + @ValidateNested() + @Type(() => ZfsPreprocessConfigInput) + zfsConfig?: ZfsPreprocessConfigInput; + + @Field(() => FlashPreprocessConfigInput, { nullable: true }) + @IsOptional() + @ValidateNested() + @Type(() => FlashPreprocessConfigInput) + flashConfig?: FlashPreprocessConfigInput; + + @Field(() => ScriptPreprocessConfigInput, { nullable: true }) + @IsOptional() + @ValidateNested() + @Type(() => ScriptPreprocessConfigInput) + scriptConfig?: ScriptPreprocessConfigInput; + + @Field(() => RawBackupConfigInput, { nullable: true }) + @IsOptional() + @ValidateNested() + @Type(() => RawBackupConfigInput) + rawConfig?: RawBackupConfigInput; +} + +@ObjectType() +export class SourceConfig { + @Field(() => Number) + timeout!: number; + + @Field(() => Boolean) + cleanupOnFailure!: boolean; + + @Field(() => ZfsPreprocessConfig, { nullable: true }) + zfsConfig?: ZfsPreprocessConfig; + + @Field(() => FlashPreprocessConfig, { nullable: true }) + flashConfig?: FlashPreprocessConfig; + + @Field(() => ScriptPreprocessConfig, { nullable: true }) + scriptConfig?: ScriptPreprocessConfig; + + @Field(() => RawBackupConfig, { nullable: true }) + rawConfig?: RawBackupConfig; +} + +export const SourceConfigUnion = createUnionType({ + name: 'SourceConfigUnion', + types: () => + [ZfsPreprocessConfig, FlashPreprocessConfig, ScriptPreprocessConfig, RawBackupConfig] as const, + resolveType(obj: any, context, info) { + if (ZfsPreprocessConfig.isTypeOf && ZfsPreprocessConfig.isTypeOf(obj)) { + return ZfsPreprocessConfig; + } + if (FlashPreprocessConfig.isTypeOf && FlashPreprocessConfig.isTypeOf(obj)) { + return FlashPreprocessConfig; + } + if (ScriptPreprocessConfig.isTypeOf && ScriptPreprocessConfig.isTypeOf(obj)) { + return ScriptPreprocessConfig; + } + if (RawBackupConfig.isTypeOf && RawBackupConfig.isTypeOf(obj)) { + return RawBackupConfig; + } + console.error(`[SourceConfigUnion] Could not resolve type for object: ${JSON.stringify(obj)}`); + return null; + }, +}); + +export const SourceConfigInputUnion = SourceConfigInput; + +export interface PreprocessResult { + success: boolean; + streamPath?: string; + outputPath?: string; + snapshotName?: string; + error?: string; + cleanupRequired?: boolean; + metadata?: Record; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/base-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/base-source.types.ts new file mode 100644 index 0000000000..f9f8e13719 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/base-source.types.ts @@ -0,0 +1,18 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { IsOptional, IsString } from 'class-validator'; + +@InputType() +export abstract class BaseSourceConfigInput { + @Field(() => String, { + description: 'Human-readable label for this source configuration', + nullable: true, + }) + @IsOptional() + @IsString() + label?: string; +} + +export interface BaseSourceConfig { + label: string; +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.ts new file mode 100644 index 0000000000..01f7204c63 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source-processor.service.ts @@ -0,0 +1,307 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { access, mkdir, writeFile } from 'fs/promises'; +import { dirname, join } from 'path'; +import { Readable } from 'stream'; + +import { execa } from 'execa'; + +import { + BackupSourceConfig, + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { FlashPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js'; +import { FlashValidationService } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.js'; + +export interface FlashSourceConfig extends BackupSourceConfig { + flashPath: string; + includeGitHistory: boolean; + additionalPaths?: string[]; +} + +@Injectable() +export class FlashSourceProcessor extends BackupSourceProcessor { + readonly sourceType = SourceType.FLASH; + private readonly logger = new Logger(FlashSourceProcessor.name); + + constructor(private readonly flashValidationService: FlashValidationService) { + super(); + } + + async execute( + config: FlashSourceConfig, + options?: BackupSourceProcessorOptions + ): Promise { + const validation = await this.validate(config); + if (!validation.valid) { + return { + success: false, + error: `Flash configuration validation failed: ${validation.error}`, + metadata: { validationError: validation.error, validationWarnings: validation.warnings }, + }; + } + + if (validation.warnings?.length) { + this.logger.warn(`Flash backup warnings: ${validation.warnings.join(', ')}`); + } + + const tempGitPath = join(config.flashPath, '.git-backup-temp'); + let gitRepoInitialized = false; + + try { + if (config.includeGitHistory) { + gitRepoInitialized = await this.initializeGitRepository(config.flashPath, tempGitPath); + if (gitRepoInitialized) { + this.logger.log(`Initialized git repository for Flash backup at: ${tempGitPath}`); + } + } + + // Generate streaming command for tar compression + const streamCommand = this.generateStreamCommand(config, gitRepoInitialized, tempGitPath); + + return { + success: true, + outputPath: config.flashPath, + streamPath: config.flashPath, + metadata: { + flashPath: config.flashPath, + gitHistoryIncluded: config.includeGitHistory && gitRepoInitialized, + additionalPaths: config.additionalPaths, + validationWarnings: validation.warnings, + tempGitPath: gitRepoInitialized ? tempGitPath : undefined, + streamCommand: streamCommand.command, + streamArgs: streamCommand.args, + sourceType: this.sourceType, + }, + cleanupRequired: gitRepoInitialized, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Flash backup failed: ${errorMessage}`, error); + + if (gitRepoInitialized) { + try { + await this.cleanupTempGitRepo(tempGitPath); + this.logger.log(`Cleaned up temporary git repository after failure: ${tempGitPath}`); + } catch (cleanupError) { + const cleanupErrorMessage = + cleanupError instanceof Error ? cleanupError.message : String(cleanupError); + this.logger.error( + `Failed to cleanup temporary git repository: ${cleanupErrorMessage}` + ); + } + } + + return { + success: false, + error: errorMessage, + cleanupRequired: gitRepoInitialized, + metadata: { + flashPath: config.flashPath, + gitRepoInitialized, + cleanupAttempted: gitRepoInitialized, + }, + }; + } + } + + async validate( + config: FlashSourceConfig + ): Promise<{ valid: boolean; error?: string; warnings?: string[] }> { + const legacyConfig: FlashPreprocessConfigInput = { + flashPath: config.flashPath, + includeGitHistory: config.includeGitHistory, + additionalPaths: config.additionalPaths, + }; + + const validationResult = await this.flashValidationService.validateFlashConfig(legacyConfig); + + return { + valid: validationResult.isValid, + error: validationResult.errors.length > 0 ? validationResult.errors.join(', ') : undefined, + warnings: validationResult.warnings, + }; + } + + async cleanup(result: BackupSourceResult): Promise { + if (result.cleanupRequired && result.metadata?.tempGitPath) { + await this.cleanupTempGitRepo(result.metadata.tempGitPath as string); + } + } + + private async initializeGitRepository(flashPath: string, tempGitPath: string): Promise { + try { + const existingGitPath = join(flashPath, '.git'); + const hasExistingRepo = await this.flashValidationService.validateGitRepository(flashPath); + + if (hasExistingRepo) { + await execa('cp', ['-r', existingGitPath, tempGitPath]); + this.logger.log('Copied existing git repository to temporary location'); + return true; + } + + await mkdir(tempGitPath, { recursive: true }); + await execa('git', ['init'], { cwd: tempGitPath }); + + const gitignorePath = join(tempGitPath, '.gitignore'); + const gitignoreContent = [ + '# Exclude sensitive files', + '*.key', + '*.pem', + '*.p12', + '*.pfx', + 'config/passwd', + 'config/shadow', + 'config/ssh/', + 'config/ssl/', + 'config/wireguard/', + 'config/network.cfg', + 'config/ident.cfg', + ].join('\n'); + + await writeFile(gitignorePath, gitignoreContent); + + await execa('git', ['add', '.'], { cwd: flashPath }); + await execa( + 'git', + [ + '-c', + 'user.name=Unraid Backup', + '-c', + 'user.email=backup@unraid.net', + 'commit', + '-m', + 'Flash backup snapshot', + ], + { cwd: flashPath } + ); + + await execa('mv', [join(flashPath, '.git'), tempGitPath]); + + this.logger.log('Initialized new git repository for Flash backup'); + return true; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.warn(`Failed to initialize git repository: ${errorMessage}`); + return false; + } + } + + private async cleanupTempGitRepo(tempGitPath: string): Promise { + try { + await execa('rm', ['-rf', tempGitPath]); + this.logger.log(`Cleaned up temporary git repository: ${tempGitPath}`); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to cleanup temporary git repository: ${errorMessage}`); + } + } + + private generateStreamCommand( + config: FlashSourceConfig, + gitRepoInitialized: boolean, + tempGitPath?: string + ): { command: string; args: string[] } { + const excludeArgs: string[] = []; + + // Standard exclusions for flash backups + const standardExcludes = ['lost+found', '*.tmp', '*.temp', '.DS_Store', 'Thumbs.db']; + + standardExcludes.forEach((pattern) => { + excludeArgs.push('--exclude', pattern); + }); + + // If git repo was initialized, include it in the backup + if (gitRepoInitialized && tempGitPath) { + excludeArgs.push('--exclude', '.git-backup-temp'); + } + + const tarArgs = [ + '-czf', // create, gzip, file + '-', // output to stdout for streaming + '-C', // change to directory + config.flashPath, + ...excludeArgs, + '.', // backup everything in the directory + ]; + + // Add additional paths if specified + if (config.additionalPaths?.length) { + config.additionalPaths.forEach((path) => { + tarArgs.push('-C', path, '.'); + }); + } + + return { + command: 'tar', + args: tarArgs, + }; + } + + get supportsStreaming(): boolean { + return true; + } + + get getReadableStream(): (config: FlashSourceConfig) => Promise { + return async (config: FlashSourceConfig): Promise => { + const validation = await this.validate(config); + if (!validation.valid) { + const errorMsg = `Flash configuration validation failed: ${validation.error}`; + this.logger.error(errorMsg); + const errorStream = new Readable({ + read() { + this.emit('error', new Error(errorMsg)); + this.push(null); + }, + }); + return errorStream; + } + + const { command, args } = this.generateStreamCommand(config, false); + + this.logger.log( + `[getReadableStream] Streaming flash backup with command: ${command} ${args.join(' ')}` + ); + + try { + const tarProcess = execa(command, args, { + cwd: config.flashPath, + }); + + tarProcess.catch((error) => { + this.logger.error( + `Error executing tar command for streaming: ${error.message}`, + error.stack + ); + }); + + if (!tarProcess.stdout) { + throw new Error('Failed to get stdout stream from tar process.'); + } + + tarProcess.stdout.on('end', () => { + this.logger.log('[getReadableStream] Tar process stdout stream ended.'); + }); + tarProcess.stdout.on('error', (err) => { + this.logger.error( + `[getReadableStream] Tar process stdout stream error: ${err.message}` + ); + }); + + return tarProcess.stdout; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`[getReadableStream] Failed to start tar process: ${errorMessage}`); + const errorStream = new Readable({ + read() { + this.emit('error', new Error(errorMessage)); + this.push(null); + }, + }); + return errorStream; + } + }; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.ts new file mode 100644 index 0000000000..db6762bf14 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.ts @@ -0,0 +1,43 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { IsBoolean, IsNotEmpty, IsOptional, IsString } from 'class-validator'; + +import { + BaseSourceConfig, + BaseSourceConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js'; + +@InputType() +export class FlashPreprocessConfigInput extends BaseSourceConfigInput { + @Field(() => String, { description: 'Flash drive mount path', defaultValue: '/boot' }) + @IsString() + @IsNotEmpty() + flashPath!: string; + + @Field(() => Boolean, { description: 'Whether to include git history', defaultValue: true }) + @IsBoolean() + includeGitHistory!: boolean; + + @Field(() => [String], { description: 'Additional paths to include in backup', nullable: true }) + @IsOptional() + additionalPaths?: string[]; +} + +@ObjectType() +export class FlashPreprocessConfig implements BaseSourceConfig { + @Field(() => String, { nullable: false }) + label: string = 'Flash drive backup'; + + @Field(() => String) + flashPath!: string; + + @Field(() => Boolean) + includeGitHistory!: boolean; + + @Field(() => [String], { nullable: true }) + additionalPaths?: string[]; + + static isTypeOf(obj: any): obj is FlashPreprocessConfig { + return obj && typeof obj.flashPath === 'string' && typeof obj.includeGitHistory === 'boolean'; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.ts new file mode 100644 index 0000000000..1f260d8ed4 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/flash/flash-validation.service.ts @@ -0,0 +1,260 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { access, constants, readdir, stat } from 'fs/promises'; +import { join } from 'path'; + +import { execa } from 'execa'; + +import { FlashPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/flash/flash-source.types.js'; + +export interface FlashValidationResult { + isValid: boolean; + errors: string[]; + warnings: string[]; + metadata: { + flashPathExists?: boolean; + flashPathMounted?: boolean; + gitRepoExists?: boolean; + gitRepoSize?: number | null; + additionalPathsValid?: boolean[]; + totalSize?: number | null; + availableSpace?: number | null; + }; +} + +@Injectable() +export class FlashValidationService { + private readonly logger = new Logger(FlashValidationService.name); + + async validateFlashConfig(config: FlashPreprocessConfigInput): Promise { + const result: FlashValidationResult = { + isValid: true, + errors: [], + warnings: [], + metadata: {}, + }; + + try { + // Validate flash path exists and is accessible + const flashPathValid = await this.validateFlashPath(config.flashPath); + result.metadata.flashPathExists = flashPathValid; + + if (!flashPathValid) { + result.errors.push( + `Flash path '${config.flashPath}' does not exist or is not accessible` + ); + result.isValid = false; + return result; + } + + // Check if flash path is mounted + const isMounted = await this.isFlashMounted(config.flashPath); + result.metadata.flashPathMounted = isMounted; + + if (!isMounted) { + result.warnings.push(`Flash path '${config.flashPath}' may not be properly mounted`); + } + + // Validate git repository if includeGitHistory is enabled + if (config.includeGitHistory) { + const gitRepoExists = await this.validateGitRepository(config.flashPath); + result.metadata.gitRepoExists = gitRepoExists; + + if (!gitRepoExists) { + result.warnings.push( + `Git repository not found in '${config.flashPath}'. Git history will be skipped.` + ); + } else { + const gitRepoSize = await this.getGitRepositorySize(config.flashPath); + result.metadata.gitRepoSize = gitRepoSize; + + if (gitRepoSize && gitRepoSize > 100 * 1024 * 1024) { + // 100MB + result.warnings.push( + `Git repository is large (${Math.round(gitRepoSize / 1024 / 1024)}MB). Backup may take longer.` + ); + } + } + } + + // Validate additional paths + if (config.additionalPaths && config.additionalPaths.length > 0) { + const pathValidations = await Promise.all( + config.additionalPaths.map((path) => this.validateAdditionalPath(path)) + ); + result.metadata.additionalPathsValid = pathValidations; + + const invalidPaths = config.additionalPaths.filter( + (_, index) => !pathValidations[index] + ); + if (invalidPaths.length > 0) { + result.warnings.push( + `Some additional paths are not accessible: ${invalidPaths.join(', ')}` + ); + } + } + + // Calculate total backup size + const totalSize = await this.calculateTotalBackupSize(config); + result.metadata.totalSize = totalSize; + + // Check available space + const availableSpace = await this.getAvailableSpace(config.flashPath); + result.metadata.availableSpace = availableSpace; + + if (totalSize && availableSpace && totalSize > availableSpace * 0.8) { + result.warnings.push( + 'Backup size may be close to available space. Monitor disk usage during backup.' + ); + } + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + result.errors.push(`Validation failed: ${errorMessage}`); + result.isValid = false; + } + + return result; + } + + async validateFlashPath(flashPath: string): Promise { + try { + await access(flashPath); + const stats = await stat(flashPath); + return stats.isDirectory(); + } catch { + return false; + } + } + + async isFlashMounted(flashPath: string): Promise { + try { + // Check if the path is a mount point by comparing device IDs + const pathStat = await stat(flashPath); + const parentStat = await stat(join(flashPath, '..')); + return pathStat.dev !== parentStat.dev; + } catch { + return false; + } + } + + async validateGitRepository(flashPath: string): Promise { + const gitPath = join(flashPath, '.git'); + try { + await access(gitPath); + const stats = await stat(gitPath); + return stats.isDirectory(); + } catch { + return false; + } + } + + async getGitRepositorySize(flashPath: string): Promise { + const gitPath = join(flashPath, '.git'); + try { + const { stdout } = await execa('du', ['-sb', gitPath]); + const size = parseInt(stdout.split('\t')[0], 10); + return isNaN(size) ? null : size; + } catch { + return null; + } + } + + async validateAdditionalPath(path: string): Promise { + try { + await access(path); + return true; + } catch { + return false; + } + } + + async calculateTotalBackupSize(config: FlashPreprocessConfigInput): Promise { + try { + let totalSize = 0; + + // Get flash directory size + const { stdout: flashSize } = await execa('du', ['-sb', config.flashPath]); + totalSize += parseInt(flashSize.split('\t')[0], 10) || 0; + + // Add additional paths if specified + if (config.additionalPaths) { + for (const path of config.additionalPaths) { + try { + const { stdout: pathSize } = await execa('du', ['-sb', path]); + totalSize += parseInt(pathSize.split('\t')[0], 10) || 0; + } catch (error: unknown) { + this.logger.warn( + `Failed to get size for additional path ${path}: ${error instanceof Error ? error.message : String(error)}` + ); + } + } + } + + return totalSize; + } catch { + return null; + } + } + + async getAvailableSpace(path: string): Promise { + try { + const { stdout } = await execa('df', ['-B1', path]); + const lines = stdout.split('\n'); + if (lines.length > 1) { + const fields = lines[1].split(/\s+/); + if (fields.length >= 4) { + const available = parseInt(fields[3], 10); + return isNaN(available) ? null : available; + } + } + return null; + } catch { + return null; + } + } + + async checkGitStatus(flashPath: string): Promise<{ + hasUncommittedChanges: boolean; + currentBranch: string | null; + lastCommitHash: string | null; + }> { + const result = { + hasUncommittedChanges: false, + currentBranch: null as string | null, + lastCommitHash: null as string | null, + }; + + try { + // Check for uncommitted changes + const { stdout: statusOutput } = await execa('git', ['status', '--porcelain'], { + cwd: flashPath, + }); + result.hasUncommittedChanges = statusOutput.trim().length > 0; + + // Get current branch + try { + const { stdout: branchOutput } = await execa( + 'git', + ['rev-parse', '--abbrev-ref', 'HEAD'], + { cwd: flashPath } + ); + result.currentBranch = branchOutput.trim(); + } catch { + // Ignore branch detection errors + } + + // Get last commit hash + try { + const { stdout: commitOutput } = await execa('git', ['rev-parse', 'HEAD'], { + cwd: flashPath, + }); + result.lastCommitHash = commitOutput.trim(); + } catch { + // Ignore commit hash detection errors + } + } catch { + // Git commands failed, repository might not be initialized + } + + return result; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.ts new file mode 100644 index 0000000000..86288d4c6d --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source-processor.service.ts @@ -0,0 +1,144 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { access, constants, stat } from 'fs/promises'; +import { join } from 'path'; + +import { + BackupSourceConfig, + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { RawBackupConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.js'; + +export interface RawSourceConfig extends BackupSourceConfig { + sourcePath: string; + excludePatterns?: string[]; + includePatterns?: string[]; +} + +@Injectable() +export class RawSourceProcessor extends BackupSourceProcessor { + readonly sourceType = SourceType.RAW; + private readonly logger = new Logger(RawSourceProcessor.name); + + get supportsStreaming(): boolean { + return false; + } + + async execute( + config: RawSourceConfig, + options?: BackupSourceProcessorOptions + ): Promise { + const startTime = Date.now(); + + try { + this.logger.log(`Starting RAW backup validation for path: ${config.sourcePath}`); + + const validation = await this.validate(config); + if (!validation.valid) { + return { + success: false, + error: validation.error || 'Validation failed', + metadata: { + validationError: validation.error, + supportsStreaming: this.supportsStreaming, + }, + supportsStreaming: this.supportsStreaming, + }; + } + + if (validation.warnings?.length) { + this.logger.warn( + `RAW backup warnings for ${config.sourcePath}: ${validation.warnings.join(', ')}` + ); + } + + const sourceStats = await stat(config.sourcePath); + const duration = Date.now() - startTime; + + this.logger.log(`RAW backup: Providing direct path for ${config.sourcePath}`); + return { + success: true, + outputPath: config.sourcePath, + supportsStreaming: this.supportsStreaming, + isStreamingMode: false, + metadata: { + sourcePath: config.sourcePath, + isDirectory: sourceStats.isDirectory(), + size: sourceStats.size, + duration, + excludePatterns: config.excludePatterns, + includePatterns: config.includePatterns, + validationWarnings: validation.warnings, + supportsStreaming: this.supportsStreaming, + }, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + const errorStack = error instanceof Error ? error.stack : undefined; + this.logger.error( + `RAW backup preparation failed for ${config.sourcePath}: ${errorMessage}`, + errorStack + ); + + return { + success: false, + error: errorMessage, + supportsStreaming: this.supportsStreaming, + metadata: { + sourcePath: config.sourcePath, + duration: Date.now() - startTime, + supportsStreaming: this.supportsStreaming, + }, + }; + } + } + + async validate( + config: RawSourceConfig + ): Promise<{ valid: boolean; error?: string; warnings?: string[] }> { + const warnings: string[] = []; + + try { + await access(config.sourcePath, constants.F_OK | constants.R_OK); + } catch { + return { + valid: false, + error: `Source path does not exist or is not readable: ${config.sourcePath}`, + }; + } + + const restrictedPaths = ['/proc', '/sys', '/dev']; + const isRestricted = restrictedPaths.some((path) => config.sourcePath.startsWith(path)); + if (isRestricted) { + return { + valid: false, + error: `Cannot backup restricted system paths: ${config.sourcePath}`, + }; + } + + if (config.excludePatterns?.length && config.includePatterns?.length) { + warnings.push( + 'Both include and exclude patterns specified - exclude patterns take precedence' + ); + } + + const stats = await stat(config.sourcePath); + if (stats.isDirectory()) { + const largeDirPaths = ['/mnt/user', '/mnt/disk']; + const isLargeDir = largeDirPaths.some((path) => config.sourcePath.startsWith(path)); + if (isLargeDir && !config.excludePatterns?.length && !config.includePatterns?.length) { + warnings.push( + 'Backing up large directory without filters may take significant time and space' + ); + } + } + + return { valid: true, warnings }; + } + + async cleanup(result: BackupSourceResult): Promise { + this.logger.log(`RAW backup cleanup completed for: ${result.metadata?.sourcePath}`); + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.ts new file mode 100644 index 0000000000..0018884789 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/raw/raw-source.types.ts @@ -0,0 +1,45 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { IsArray, IsNotEmpty, IsOptional, IsString } from 'class-validator'; + +import { + BaseSourceConfig, + BaseSourceConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js'; + +@InputType() +export class RawBackupConfigInput extends BaseSourceConfigInput { + @Field(() => String, { description: 'Source path to backup' }) + @IsString() + @IsNotEmpty() + sourcePath!: string; + + @Field(() => [String], { description: 'File patterns to exclude from backup', nullable: true }) + @IsOptional() + @IsArray() + excludePatterns?: string[]; + + @Field(() => [String], { description: 'File patterns to include in backup', nullable: true }) + @IsOptional() + @IsArray() + includePatterns?: string[]; +} + +@ObjectType() +export class RawBackupConfig implements BaseSourceConfig { + @Field(() => String, { nullable: false }) + label: string = 'Raw file backup'; + + @Field(() => String) + sourcePath!: string; + + @Field(() => [String], { nullable: true }) + excludePatterns?: string[]; + + @Field(() => [String], { nullable: true }) + includePatterns?: string[]; + + static isTypeOf(obj: any): obj is RawBackupConfig { + return obj && typeof obj.sourcePath === 'string'; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.ts new file mode 100644 index 0000000000..803f30aa38 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/script/script-source-processor.service.ts @@ -0,0 +1,252 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { promises as fs } from 'fs'; +import { dirname } from 'path'; + +import { execa } from 'execa'; + +import { + BackupSourceConfig, + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; + +export interface ScriptSourceConfig extends BackupSourceConfig { + scriptPath: string; + scriptArgs?: string[]; + workingDirectory?: string; + environment?: Record; + outputPath: string; +} + +@Injectable() +export class ScriptSourceProcessor extends BackupSourceProcessor { + readonly sourceType = SourceType.SCRIPT; + private readonly logger = new Logger(ScriptSourceProcessor.name); + private readonly tempDir = '/tmp/unraid-script-preprocessing'; + private readonly maxOutputSize = 100 * 1024 * 1024; // 100MB limit + + get supportsStreaming(): boolean { + return false; + } + + async execute( + config: ScriptSourceConfig, + options?: BackupSourceProcessorOptions + ): Promise { + const startTime = Date.now(); + + const validation = await this.validate(config); + if (!validation.valid) { + return { + success: false, + error: `Script configuration validation failed: ${validation.error}`, + metadata: { validationError: validation.error, validationWarnings: validation.warnings }, + }; + } + + if (validation.warnings?.length) { + this.logger.warn(`Script backup warnings: ${validation.warnings.join(', ')}`); + } + + try { + await this.ensureTempDirectory(); + + const { command, args } = this.buildCommand(config); + + this.logger.log(`Executing script: ${command} ${args.join(' ')}`); + + await this.runScriptWithTimeout(command, args, config.timeout / 1000); + + const outputSize = await this.getFileSize(config.outputPath); + if (outputSize === 0) { + throw new Error('Script produced no output'); + } + + if (outputSize > this.maxOutputSize) { + throw new Error( + `Script output too large: ${outputSize} bytes (max: ${this.maxOutputSize})` + ); + } + + const duration = Date.now() - startTime; + this.logger.log( + `Script completed successfully in ${duration}ms, output size: ${outputSize} bytes` + ); + + return { + success: true, + outputPath: config.outputPath, + metadata: { + scriptPath: config.scriptPath, + duration, + outputSize, + workingDirectory: config.workingDirectory, + scriptArgs: config.scriptArgs, + validationWarnings: validation.warnings, + }, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Script backup failed: ${errorMessage}`); + + try { + await fs.unlink(config.outputPath); + } catch { + // Ignore cleanup errors + } + + return { + success: false, + error: errorMessage, + metadata: { + scriptPath: config.scriptPath, + duration: Date.now() - startTime, + workingDirectory: config.workingDirectory, + scriptArgs: config.scriptArgs, + }, + }; + } + } + + async validate( + config: ScriptSourceConfig + ): Promise<{ valid: boolean; error?: string; warnings?: string[] }> { + try { + await fs.access(config.scriptPath, fs.constants.F_OK | fs.constants.X_OK); + + const restrictedPaths = ['/boot', '/mnt/user', '/mnt/disk']; + const isRestricted = restrictedPaths.some((path) => config.scriptPath.startsWith(path)); + + if (isRestricted) { + return { + valid: false, + error: 'Script cannot be located in restricted paths (/boot, /mnt/user, /mnt/disk*)', + }; + } + + if (config.workingDirectory) { + try { + await fs.access(config.workingDirectory, fs.constants.F_OK); + } catch { + return { + valid: false, + error: `Working directory does not exist: ${config.workingDirectory}`, + }; + } + } + + const outputDir = dirname(config.outputPath); + try { + await fs.access(outputDir, fs.constants.F_OK | fs.constants.W_OK); + } catch { + return { + valid: false, + error: `Output directory does not exist or is not writable: ${outputDir}`, + }; + } + + if (config.scriptArgs) { + for (const arg of config.scriptArgs) { + if (arg.length > 1000) { + return { + valid: false, + error: `Script argument too long (max 1000 characters): ${arg.substring(0, 50)}...`, + }; + } + } + } + + return { valid: true }; + } catch { + return { + valid: false, + error: `Script does not exist or is not executable: ${config.scriptPath}`, + }; + } + } + + async cleanup(result: BackupSourceResult): Promise { + if (result.outputPath) { + await this.cleanupFile(result.outputPath); + } + } + + private async ensureTempDirectory(): Promise { + try { + await fs.access(this.tempDir); + } catch { + await fs.mkdir(this.tempDir, { recursive: true, mode: 0o700 }); + } + } + + private buildCommand(config: ScriptSourceConfig): { command: string; args: string[] } { + const command = 'timeout'; + const args = [ + `${config.timeout / 1000}s`, + 'nice', + '-n', + '10', + 'ionice', + '-c', + '3', + 'bash', + '-c', + `cd "${config.workingDirectory || '/tmp'}" && exec "${config.scriptPath}" ${(config.scriptArgs || []).join(' ')}`, + ]; + + return { command, args }; + } + + private async runScriptWithTimeout( + command: string, + args: string[], + timeoutSeconds: number + ): Promise { + try { + await execa(command, args, { + timeout: timeoutSeconds * 1000, + stdio: ['ignore', 'pipe', 'pipe'], + env: { + ...process.env, + PATH: '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + }, + uid: 99, // nobody user + gid: 99, // nobody group + }); + } catch (error: any) { + if (error.timedOut) { + throw new Error(`Script timeout after ${timeoutSeconds} seconds`); + } + if (error.signal) { + throw new Error(`Script killed by signal: ${error.signal}`); + } + if (error.exitCode !== undefined && error.exitCode !== 0) { + throw new Error( + `Script exited with code ${error.exitCode}. stderr: ${error.stderr || ''}` + ); + } + throw new Error(`Failed to execute script: ${error.message}`); + } + } + + private async getFileSize(filePath: string): Promise { + try { + const stats = await fs.stat(filePath); + return stats.size; + } catch { + return 0; + } + } + + private async cleanupFile(filePath: string): Promise { + try { + await fs.unlink(filePath); + this.logger.log(`Cleaned up script output file: ${filePath}`); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`Failed to cleanup script output ${filePath}: ${errorMessage}`); + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/script/script-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/script/script-source.types.ts new file mode 100644 index 0000000000..50c52d93ca --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/script/script-source.types.ts @@ -0,0 +1,63 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { IsNotEmpty, IsOptional, IsString } from 'class-validator'; +import { GraphQLJSON } from 'graphql-scalars'; + +import { + BaseSourceConfig, + BaseSourceConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js'; + +@InputType() +export class ScriptPreprocessConfigInput extends BaseSourceConfigInput { + @Field(() => String, { description: 'Path to the script file' }) + @IsString() + @IsNotEmpty() + scriptPath!: string; + + @Field(() => [String], { description: 'Arguments to pass to the script', nullable: true }) + @IsOptional() + scriptArgs?: string[]; + + @Field(() => String, { description: 'Working directory for script execution', nullable: true }) + @IsOptional() + @IsString() + workingDirectory?: string; + + @Field(() => GraphQLJSON, { + description: 'Environment variables for script execution', + nullable: true, + }) + @IsOptional() + environment?: Record; + + @Field(() => String, { description: 'Output file path where script should write data' }) + @IsString() + @IsNotEmpty() + outputPath!: string; +} + +@ObjectType() +export class ScriptPreprocessConfig implements BaseSourceConfig { + @Field(() => String, { nullable: false }) + label: string = 'Script backup'; + + @Field(() => String) + scriptPath!: string; + + @Field(() => [String], { nullable: true }) + scriptArgs?: string[]; + + @Field(() => String, { nullable: true }) + workingDirectory?: string; + + @Field(() => GraphQLJSON, { nullable: true }) + environment?: Record; + + @Field(() => String) + outputPath!: string; + + static isTypeOf(obj: any): obj is ScriptPreprocessConfig { + return obj && typeof obj.scriptPath === 'string' && typeof obj.outputPath === 'string'; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/script/script-validation.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/script/script-validation.service.ts new file mode 100644 index 0000000000..e68d3f6297 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/script/script-validation.service.ts @@ -0,0 +1,285 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { access, constants, stat } from 'fs/promises'; +import { dirname, isAbsolute, resolve } from 'path'; + +import { ScriptPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/script/script-source.types.js'; + +export interface ScriptValidationResult { + isValid: boolean; + errors: string[]; + warnings: string[]; + metadata: { + scriptExists?: boolean; + scriptExecutable?: boolean; + workingDirectoryExists?: boolean; + outputDirectoryExists?: boolean; + outputDirectoryWritable?: boolean; + environmentVariablesValid?: boolean; + resolvedScriptPath?: string; + resolvedWorkingDirectory?: string; + resolvedOutputPath?: string; + }; +} + +@Injectable() +export class ScriptValidationService { + private readonly logger = new Logger(ScriptValidationService.name); + + async validateScriptConfig(config: ScriptPreprocessConfigInput): Promise { + const result: ScriptValidationResult = { + isValid: true, + errors: [], + warnings: [], + metadata: {}, + }; + + try { + // Resolve and validate script path + const resolvedScriptPath = this.resolveScriptPath( + config.scriptPath, + config.workingDirectory + ); + result.metadata.resolvedScriptPath = resolvedScriptPath; + + const scriptExists = await this.validateScriptExists(resolvedScriptPath); + result.metadata.scriptExists = scriptExists; + + if (!scriptExists) { + result.errors.push(`Script file '${resolvedScriptPath}' does not exist`); + result.isValid = false; + return result; + } + + // Check if script is executable + const scriptExecutable = await this.validateScriptExecutable(resolvedScriptPath); + result.metadata.scriptExecutable = scriptExecutable; + + if (!scriptExecutable) { + result.warnings.push(`Script file '${resolvedScriptPath}' may not be executable`); + } + + // Validate working directory + if (config.workingDirectory) { + const resolvedWorkingDir = resolve(config.workingDirectory); + result.metadata.resolvedWorkingDirectory = resolvedWorkingDir; + + const workingDirExists = await this.validateDirectory(resolvedWorkingDir); + result.metadata.workingDirectoryExists = workingDirExists; + + if (!workingDirExists) { + result.errors.push(`Working directory '${resolvedWorkingDir}' does not exist`); + result.isValid = false; + } + } + + // Validate output path and directory + const resolvedOutputPath = this.resolveOutputPath( + config.outputPath, + config.workingDirectory + ); + result.metadata.resolvedOutputPath = resolvedOutputPath; + + const outputDirectory = dirname(resolvedOutputPath); + const outputDirExists = await this.validateDirectory(outputDirectory); + result.metadata.outputDirectoryExists = outputDirExists; + + if (!outputDirExists) { + result.errors.push(`Output directory '${outputDirectory}' does not exist`); + result.isValid = false; + } else { + // Check if output directory is writable + const outputDirWritable = await this.validateDirectoryWritable(outputDirectory); + result.metadata.outputDirectoryWritable = outputDirWritable; + + if (!outputDirWritable) { + result.errors.push(`Output directory '${outputDirectory}' is not writable`); + result.isValid = false; + } + } + + // Validate environment variables + if (config.environment) { + const envValid = this.validateEnvironmentVariables(config.environment); + result.metadata.environmentVariablesValid = envValid; + + if (!envValid) { + result.warnings.push('Some environment variables may contain invalid values'); + } + } + + // Security validations + this.performSecurityValidations(config, result); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + result.errors.push(`Validation failed: ${errorMessage}`); + result.isValid = false; + } + + return result; + } + + private resolveScriptPath(scriptPath: string, workingDirectory?: string): string { + if (isAbsolute(scriptPath)) { + return scriptPath; + } + + const baseDir = workingDirectory || process.cwd(); + return resolve(baseDir, scriptPath); + } + + private resolveOutputPath(outputPath: string, workingDirectory?: string): string { + if (isAbsolute(outputPath)) { + return outputPath; + } + + const baseDir = workingDirectory || process.cwd(); + return resolve(baseDir, outputPath); + } + + async validateScriptExists(scriptPath: string): Promise { + try { + await access(scriptPath); + const stats = await stat(scriptPath); + return stats.isFile(); + } catch { + return false; + } + } + + async validateScriptExecutable(scriptPath: string): Promise { + try { + const stats = await stat(scriptPath); + // Check if file has execute permissions (basic check) + return (stats.mode & parseInt('111', 8)) !== 0; + } catch { + return false; + } + } + + async validateDirectory(dirPath: string): Promise { + try { + await access(dirPath); + const stats = await stat(dirPath); + return stats.isDirectory(); + } catch { + return false; + } + } + + async validateDirectoryWritable(dirPath: string): Promise { + try { + const stats = await stat(dirPath); + // Check if directory has write permissions (basic check) + return (stats.mode & parseInt('200', 8)) !== 0; + } catch { + return false; + } + } + + validateEnvironmentVariables(environment: Record): boolean { + try { + // Check for potentially dangerous environment variables + const dangerousVars = ['PATH', 'LD_LIBRARY_PATH', 'HOME', 'USER']; + const hasDangerousVars = Object.keys(environment).some((key) => + dangerousVars.includes(key.toUpperCase()) + ); + + if (hasDangerousVars) { + this.logger.warn('Script environment contains potentially dangerous variables'); + } + + // Check for valid variable names (basic validation) + const validVarName = /^[A-Za-z_][A-Za-z0-9_]*$/; + const invalidVars = Object.keys(environment).filter((key) => !validVarName.test(key)); + + if (invalidVars.length > 0) { + this.logger.warn(`Invalid environment variable names: ${invalidVars.join(', ')}`); + return false; + } + + return true; + } catch { + return false; + } + } + + private performSecurityValidations( + config: ScriptPreprocessConfigInput, + result: ScriptValidationResult + ): void { + // Check for potentially dangerous script paths + const dangerousPaths = ['/bin', '/usr/bin', '/sbin', '/usr/sbin']; + const scriptInDangerousPath = dangerousPaths.some((path) => + result.metadata.resolvedScriptPath?.startsWith(path) + ); + + if (scriptInDangerousPath) { + result.warnings.push( + 'Script is located in a system directory. Ensure it is safe to execute.' + ); + } + + // Check for dangerous script arguments + if (config.scriptArgs) { + const dangerousArgs = config.scriptArgs.filter( + (arg) => + arg.includes('..') || + arg.includes('rm ') || + arg.includes('sudo ') || + arg.includes('su ') + ); + + if (dangerousArgs.length > 0) { + result.warnings.push( + 'Script arguments contain potentially dangerous commands or paths.' + ); + } + } + + // Check if output path is in a safe location + if (result.metadata.resolvedOutputPath) { + const systemPaths = ['/bin', '/usr', '/etc', '/var', '/sys', '/proc']; + const outputInSystemPath = systemPaths.some((path) => + result.metadata.resolvedOutputPath?.startsWith(path) + ); + + if (outputInSystemPath) { + result.errors.push('Output path cannot be in system directories for security reasons.'); + result.isValid = false; + } + } + + // Validate script file extension for common script types + if (result.metadata.resolvedScriptPath) { + const scriptExt = result.metadata.resolvedScriptPath.split('.').pop()?.toLowerCase(); + const allowedExtensions = ['sh', 'bash', 'py', 'pl', 'rb', 'js', 'php']; + + if (scriptExt && !allowedExtensions.includes(scriptExt)) { + result.warnings.push( + `Script extension '.${scriptExt}' is not commonly recognized. Ensure it is executable.` + ); + } + } + } + + async getScriptInfo(scriptPath: string): Promise<{ + size: number | null; + lastModified: Date | null; + permissions: string | null; + }> { + try { + const stats = await stat(scriptPath); + return { + size: stats.size, + lastModified: stats.mtime, + permissions: '0' + (stats.mode & parseInt('777', 8)).toString(8), + }; + } catch { + return { + size: null, + lastModified: null, + permissions: null, + }; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.ts new file mode 100644 index 0000000000..c3e0a80858 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source-processor.service.ts @@ -0,0 +1,139 @@ +import { Injectable, Logger } from '@nestjs/common'; + +import { execa } from 'execa'; + +import { + BackupSourceConfig, + BackupSourceProcessor, + BackupSourceProcessorOptions, + BackupSourceResult, +} from '@app/unraid-api/graph/resolvers/backup/source/backup-source-processor.interface.js'; +import { SourceType } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.types.js'; +import { ZfsPreprocessConfig } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js'; +import { ZfsValidationService } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.js'; + +export interface ZfsSourceConfig extends BackupSourceConfig { + poolName: string; + datasetName: string; + snapshotPrefix?: string; + cleanupSnapshots: boolean; + retainSnapshots?: number; +} + +@Injectable() +export class ZfsSourceProcessor extends BackupSourceProcessor { + readonly sourceType = SourceType.ZFS; + private readonly logger = new Logger(ZfsSourceProcessor.name); + + constructor(private readonly zfsValidationService: ZfsValidationService) { + super(); + } + + get supportsStreaming(): boolean { + return true; + } + + async validate( + config: ZfsSourceConfig + ): Promise<{ valid: boolean; error?: string; warnings?: string[] }> { + try { + const result = await this.zfsValidationService.validateZfsConfig(config as any); + return { + valid: result.isValid, + error: result.errors.length > 0 ? result.errors.join(', ') : undefined, + warnings: result.warnings, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { valid: false, error: errorMessage }; + } + } + + async execute( + config: ZfsSourceConfig, + options?: BackupSourceProcessorOptions + ): Promise { + try { + this.logger.log(`Starting ZFS backup for dataset: ${config.poolName}/${config.datasetName}`); + + const validation = await this.validate(config); + if (!validation.valid) { + return { + success: false, + error: validation.error || 'ZFS validation failed', + cleanupRequired: false, + }; + } + + const snapshotName = await this.createSnapshot(config); + const snapshotPath = `${config.poolName}/${config.datasetName}@${snapshotName}`; + + this.logger.log(`Created ZFS snapshot: ${snapshotPath}`); + + const result: BackupSourceResult = { + success: true, + outputPath: snapshotPath, + snapshotName, + cleanupRequired: config.cleanupSnapshots, + metadata: { + poolName: config.poolName, + datasetName: config.datasetName, + snapshotPath, + }, + }; + + return result; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.logger.error(`ZFS backup failed: ${errorMessage}`, error); + + return { + success: false, + error: errorMessage, + cleanupRequired: false, + }; + } + } + + async cleanup(result: BackupSourceResult): Promise { + if (!result.cleanupRequired || !result.snapshotName) { + return; + } + + try { + const snapshotPath = (result.metadata?.snapshotPath as string) || result.outputPath; + if (snapshotPath && typeof snapshotPath === 'string') { + await this.destroySnapshot(snapshotPath); + this.logger.log(`Cleaned up ZFS snapshot: ${snapshotPath}`); + } + } catch (error) { + this.logger.error(`Failed to cleanup ZFS snapshot: ${error}`); + } + } + + private async createSnapshot(config: ZfsSourceConfig): Promise { + const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); + const prefix = config.snapshotPrefix || 'backup'; + const snapshotName = `${prefix}-${timestamp}`; + const snapshotPath = `${config.poolName}/${config.datasetName}@${snapshotName}`; + + const { stdout, stderr } = await execa('zfs', ['snapshot', snapshotPath]); + + if (stderr) { + this.logger.warn(`ZFS snapshot creation warning: ${stderr}`); + } + + this.logger.debug(`ZFS snapshot created: ${stdout}`); + return snapshotName; + } + + private async destroySnapshot(snapshotPath: string): Promise { + const { stdout, stderr } = await execa('zfs', ['destroy', snapshotPath]); + + if (stderr) { + this.logger.warn(`ZFS snapshot destruction warning: ${stderr}`); + } + + this.logger.debug(`ZFS snapshot destroyed: ${stdout}`); + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.ts b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.ts new file mode 100644 index 0000000000..f01a01326c --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.ts @@ -0,0 +1,64 @@ +import { Field, InputType, ObjectType } from '@nestjs/graphql'; + +import { IsBoolean, IsNotEmpty, IsNumber, IsOptional, IsString, Min } from 'class-validator'; + +import { + BaseSourceConfig, + BaseSourceConfigInput, +} from '@app/unraid-api/graph/resolvers/backup/source/base-source.types.js'; + +@InputType() +export class ZfsPreprocessConfigInput extends BaseSourceConfigInput { + @Field(() => String, { description: 'ZFS pool name' }) + @IsString() + @IsNotEmpty() + poolName!: string; + + @Field(() => String, { description: 'Dataset name within the pool' }) + @IsString() + @IsNotEmpty() + datasetName!: string; + + @Field(() => String, { description: 'Snapshot name prefix', nullable: true }) + @IsOptional() + @IsString() + snapshotPrefix?: string; + + @Field(() => Boolean, { + description: 'Whether to cleanup snapshots after backup', + defaultValue: true, + }) + @IsBoolean() + cleanupSnapshots!: boolean; + + @Field(() => Number, { description: 'Number of snapshots to retain', nullable: true }) + @IsOptional() + @IsNumber() + @Min(1) + retainSnapshots?: number; +} + +@ObjectType() +export class ZfsPreprocessConfig implements BaseSourceConfig { + @Field(() => String, { nullable: false }) + label: string = 'ZFS backup'; + + @Field(() => String) + poolName!: string; + + @Field(() => String) + datasetName!: string; + + @Field(() => String, { nullable: true }) + snapshotPrefix?: string; + + @Field(() => Boolean) + cleanupSnapshots!: boolean; + + @Field(() => Number, { nullable: true }) + retainSnapshots?: number; + + static isTypeOf(obj: any): obj is ZfsPreprocessConfig { + return obj && typeof obj.poolName === 'string' && typeof obj.datasetName === 'string'; + } +} diff --git a/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.ts b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.ts new file mode 100644 index 0000000000..6bee2aaf65 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/backup/source/zfs/zfs-validation.service.ts @@ -0,0 +1,245 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { access, constants } from 'fs/promises'; + +import { execa } from 'execa'; + +import { ZfsPreprocessConfigInput } from '@app/unraid-api/graph/resolvers/backup/source/zfs/zfs-source.types.js'; + +export interface ZfsValidationResult { + isValid: boolean; + errors: string[]; + warnings: string[]; + metadata: { + poolExists?: boolean; + datasetExists?: boolean; + datasetSize?: number; + availableSpace?: number; + mountpoint?: string; + }; +} + +@Injectable() +export class ZfsValidationService { + private readonly logger = new Logger(ZfsValidationService.name); + + async validateZfsConfig(config: ZfsPreprocessConfigInput): Promise { + const result: ZfsValidationResult = { + isValid: true, + errors: [], + warnings: [], + metadata: {}, + }; + + try { + // Validate pool exists + const poolExists = await this.validatePool(config.poolName); + result.metadata.poolExists = poolExists; + + if (!poolExists) { + result.errors.push(`ZFS pool '${config.poolName}' does not exist`); + result.isValid = false; + return result; + } + + // Validate dataset exists + const datasetExists = await this.validateDataset(config.poolName, config.datasetName); + result.metadata.datasetExists = datasetExists; + + if (!datasetExists) { + result.errors.push( + `ZFS dataset '${config.poolName}/${config.datasetName}' does not exist` + ); + result.isValid = false; + return result; + } + + // Get dataset information + const datasetInfo = await this.getDatasetInfo(config.poolName, config.datasetName); + result.metadata = { ...result.metadata, ...datasetInfo }; + + // Validate dataset is mounted + if (!datasetInfo.mountpoint || datasetInfo.mountpoint === 'none') { + result.warnings.push( + `Dataset '${config.poolName}/${config.datasetName}' is not mounted` + ); + } + + // Check available space for snapshots + if (datasetInfo.availableSpace && datasetInfo.datasetSize) { + const spaceRatio = datasetInfo.availableSpace / datasetInfo.datasetSize; + if (spaceRatio < 0.1) { + result.warnings.push( + 'Low available space for snapshot creation (less than 10% of dataset size)' + ); + } + } + + // Validate snapshot retention settings + if (config.retainSnapshots && config.retainSnapshots < 1) { + result.errors.push('Retain snapshots must be at least 1'); + result.isValid = false; + } + + // Check for existing snapshots if cleanup is disabled + if (!config.cleanupSnapshots) { + const existingSnapshots = await this.getExistingSnapshots( + config.poolName, + config.datasetName, + config.snapshotPrefix + ); + if (existingSnapshots.length > 10) { + result.warnings.push( + `Found ${existingSnapshots.length} existing snapshots. Consider enabling cleanup.` + ); + } + } + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + result.errors.push(`Validation failed: ${errorMessage}`); + result.isValid = false; + } + + return result; + } + + async validatePool(poolName: string): Promise { + try { + await execa('zpool', ['list', '-H', '-o', 'name', poolName]); + return true; + } catch { + return false; + } + } + + async validateDataset(poolName: string, datasetName: string): Promise { + const fullPath = `${poolName}/${datasetName}`; + try { + await execa('zfs', ['list', '-H', '-o', 'name', fullPath]); + return true; + } catch { + return false; + } + } + + async getDatasetInfo( + poolName: string, + datasetName: string + ): Promise<{ + datasetSize?: number; + availableSpace?: number; + mountpoint?: string; + }> { + const fullPath = `${poolName}/${datasetName}`; + const result: { datasetSize?: number; availableSpace?: number; mountpoint?: string } = {}; + + try { + // Get dataset size + const { stdout: sizeOutput } = await execa('zfs', [ + 'list', + '-H', + '-p', + '-o', + 'used', + fullPath, + ]); + const size = parseInt(sizeOutput.trim(), 10); + if (!isNaN(size)) { + result.datasetSize = size; + } + } catch (error: unknown) { + this.logger.warn( + `Failed to get dataset size: ${error instanceof Error ? error.message : String(error)}` + ); + } + + try { + // Get available space + const { stdout: availOutput } = await execa('zfs', [ + 'list', + '-H', + '-p', + '-o', + 'avail', + fullPath, + ]); + const avail = parseInt(availOutput.trim(), 10); + if (!isNaN(avail)) { + result.availableSpace = avail; + } + } catch (error: unknown) { + this.logger.warn( + `Failed to get available space: ${error instanceof Error ? error.message : String(error)}` + ); + } + + try { + // Get mountpoint + const { stdout: mountOutput } = await execa('zfs', [ + 'list', + '-H', + '-o', + 'mountpoint', + fullPath, + ]); + result.mountpoint = mountOutput.trim(); + } catch (error: unknown) { + this.logger.warn( + `Failed to get mountpoint: ${error instanceof Error ? error.message : String(error)}` + ); + } + + return result; + } + + async getExistingSnapshots( + poolName: string, + datasetName: string, + prefix?: string + ): Promise { + const fullPath = `${poolName}/${datasetName}`; + + try { + const { stdout } = await execa('zfs', [ + 'list', + '-H', + '-t', + 'snapshot', + '-o', + 'name', + '-r', + fullPath, + ]); + const snapshots = stdout.split('\n').filter((line) => line.trim()); + + if (prefix) { + const prefixPattern = `${fullPath}@${prefix}`; + return snapshots.filter((snapshot) => snapshot.startsWith(prefixPattern)); + } + + return snapshots.filter((snapshot) => snapshot.startsWith(`${fullPath}@`)); + } catch { + return []; + } + } + + async getPoolHealth(poolName: string): Promise { + try { + const { stdout } = await execa('zpool', ['list', '-H', '-o', 'health', poolName]); + return stdout.trim(); + } catch { + return null; + } + } + + async canCreateSnapshot(poolName: string, datasetName: string): Promise { + // Check if we have write permissions and the dataset is not readonly + const fullPath = `${poolName}/${datasetName}`; + + try { + const { stdout } = await execa('zfs', ['get', '-H', '-o', 'value', 'readonly', fullPath]); + return stdout.trim() === 'off'; + } catch { + return false; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts b/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts index 73dad03e19..db3b54e727 100644 --- a/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts +++ b/api/src/unraid-api/graph/resolvers/mutation/mutation.model.ts @@ -19,6 +19,11 @@ export class DockerMutations {} @ObjectType() export class VmMutations {} +@ObjectType({ + description: 'Backup related mutations', +}) +export class BackupMutations {} + @ObjectType({ description: 'API Key related mutations', }) @@ -51,6 +56,9 @@ export class RootMutations { @Field(() => VmMutations, { description: 'VM related mutations' }) vm: VmMutations = new VmMutations(); + @Field(() => BackupMutations, { description: 'Backup related mutations' }) + backup: BackupMutations = new BackupMutations(); + @Field(() => ApiKeyMutations, { description: 'API Key related mutations' }) apiKey: ApiKeyMutations = new ApiKeyMutations(); diff --git a/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts b/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts index 42a9cb126a..3bd6b69d3a 100644 --- a/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts +++ b/api/src/unraid-api/graph/resolvers/mutation/mutation.resolver.ts @@ -3,6 +3,7 @@ import { Mutation, Resolver } from '@nestjs/graphql'; import { ApiKeyMutations, ArrayMutations, + BackupMutations, DockerMutations, ParityCheckMutations, RCloneMutations, @@ -27,6 +28,11 @@ export class RootMutationsResolver { return new VmMutations(); } + @Mutation(() => BackupMutations, { name: 'backup' }) + backup(): BackupMutations { + return new BackupMutations(); + } + @Mutation(() => ParityCheckMutations, { name: 'parityCheck' }) parityCheck(): ParityCheckMutations { return new ParityCheckMutations(); diff --git a/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html b/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html new file mode 100644 index 0000000000..b966b3182b --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/rclone/Remote Control _ API.html @@ -0,0 +1,2626 @@ + + + + + + + + + + + Remote Control / API + + + + + + + + + + + +
+
+
+ + + v1.40 + + + + +

Remote controlling rclone with its API

+

If rclone is run with the --rc flag then it starts an HTTP server +which can be used to remote control rclone using its API.

+

You can either use the rc command to access the API +or use HTTP directly.

+

If you just want to run a remote control then see the rcd command.

+

Supported parameters

+

--rc

+

Flag to start the http server listen on remote requests.

+

--rc-addr=IP

+

IPaddress:Port or :Port to bind server to. (default "localhost:5572").

+

--rc-cert=KEY

+

SSL PEM key (concatenation of certificate and CA certificate).

+

--rc-client-ca=PATH

+

Client certificate authority to verify clients with.

+

--rc-htpasswd=PATH

+

htpasswd file - if not provided no authentication is done.

+

--rc-key=PATH

+

TLS PEM private key file.

+

--rc-max-header-bytes=VALUE

+

Maximum size of request header (default 4096).

+

--rc-min-tls-version=VALUE

+

The minimum TLS version that is acceptable. Valid values are "tls1.0", +"tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").

+

--rc-user=VALUE

+

User name for authentication.

+

--rc-pass=VALUE

+

Password for authentication.

+

--rc-realm=VALUE

+

Realm for authentication (default "rclone").

+

--rc-server-read-timeout=DURATION

+

Timeout for server reading data (default 1h0m0s).

+

--rc-server-write-timeout=DURATION

+

Timeout for server writing data (default 1h0m0s).

+

--rc-serve

+

Enable the serving of remote objects via the HTTP interface. This +means objects will be accessible at http://127.0.0.1:5572/ by default, +so you can browse to http://127.0.0.1:5572/ or http://127.0.0.1:5572/* +to see a listing of the remotes. Objects may be requested from +remotes using this syntax http://127.0.0.1:5572/[remote:path]/path/to/object

+

Default Off.

+

--rc-serve-no-modtime

+

Set this flag to skip reading the modification time (can speed things up).

+

Default Off.

+

--rc-files /path/to/directory

+

Path to local files to serve on the HTTP server.

+

If this is set then rclone will serve the files in that directory. It +will also open the root in the web browser if specified. This is for +implementing browser based GUIs for rclone functions.

+

If --rc-user or --rc-pass is set then the URL that is opened will +have the authorization in the URL in the http://user:pass@localhost/ +style.

+

Default Off.

+

--rc-enable-metrics

+

Enable OpenMetrics/Prometheus compatible endpoint at /metrics. +If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the --metrics-* flags instead.

+

Default Off.

+

--rc-web-gui

+

Set this flag to serve the default web gui on the same port as rclone.

+

Default Off.

+

--rc-allow-origin

+

Set the allowed Access-Control-Allow-Origin for rc requests.

+

Can be used with --rc-web-gui if the rclone is running on different IP than the web-gui.

+

Default is IP address on which rc is running.

+

--rc-web-fetch-url

+

Set the URL to fetch the rclone-web-gui files from.

+

Default https://api.github.com/repos/rclone/rclone-webui-react/releases/latest.

+

--rc-web-gui-update

+

Set this flag to check and update rclone-webui-react from the rc-web-fetch-url.

+

Default Off.

+

--rc-web-gui-force-update

+

Set this flag to force update rclone-webui-react from the rc-web-fetch-url.

+

Default Off.

+

--rc-web-gui-no-open-browser

+

Set this flag to disable opening browser automatically when using web-gui.

+

Default Off.

+

--rc-job-expire-duration=DURATION

+

Expire finished async jobs older than DURATION (default 60s).

+

--rc-job-expire-interval=DURATION

+

Interval duration to check for expired async jobs (default 10s).

+

--rc-no-auth

+

By default rclone will require authorisation to have been set up on +the rc interface in order to use any methods which access any rclone +remotes. Eg operations/list is denied as it involved creating a +remote as is sync/copy.

+

If this is set then no authorisation will be required on the server to +use these methods. The alternative is to use --rc-user and +--rc-pass and use these credentials in the request.

+

Default Off.

+

--rc-baseurl

+

Prefix for URLs.

+

Default is root

+

--rc-template

+

User-specified template.

+

Accessing the remote control via the rclone rc command

+

Rclone itself implements the remote control protocol in its rclone rc command.

+

You can use it like this:

+
$ rclone rc rc/noop param1=one param2=two
+{
+	"param1": "one",
+	"param2": "two"
+}
+

If the remote is running on a different URL than the default +http://localhost:5572/, use the --url option to specify it:

+
$ rclone rc --url http://some.remote:1234/ rc/noop
+

Or, if the remote is listening on a Unix socket, use the --unix-socket option +instead:

+
$ rclone rc --unix-socket /tmp/rclone.sock rc/noop
+

Run rclone rc on its own, without any commands, to see the help for the +installed remote control commands. Note that this also needs to connect to the +remote server.

+

JSON input

+

rclone rc also supports a --json flag which can be used to send +more complicated input parameters.

+
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop
+{
+	"p1": [
+		1,
+		"2",
+		null,
+		4
+	],
+	"p2": {
+		"a": 1,
+		"b": 2
+	}
+}
+

If the parameter being passed is an object then it can be passed as a +JSON string rather than using the --json flag which simplifies the +command line.

+
rclone rc operations/list fs=/tmp remote=test opt='{"showHash": true}'
+

Rather than

+
rclone rc operations/list --json '{"fs": "/tmp", "remote": "test", "opt": {"showHash": true}}'
+

Special parameters

+

The rc interface supports some special parameters which apply to +all commands. These start with _ to show they are different.

+

Running asynchronous jobs with _async = true

+

Each rc call is classified as a job and it is assigned its own id. By default +jobs are executed immediately as they are created or synchronously.

+

If _async has a true value when supplied to an rc call then it will +return immediately with a job id and the task will be run in the +background. The job/status call can be used to get information of +the background job. The job can be queried for up to 1 minute after +it has finished.

+

It is recommended that potentially long running jobs, e.g. sync/sync, +sync/copy, sync/move, operations/purge are run with the _async +flag to avoid any potential problems with the HTTP request and +response timing out.

+

Starting a job with the _async flag:

+
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop
+{
+	"jobid": 2
+}
+

Query the status to see if the job has finished. For more information +on the meaning of these return parameters see the job/status call.

+
$ rclone rc --json '{ "jobid":2 }' job/status
+{
+	"duration": 0.000124163,
+	"endTime": "2018-10-27T11:38:07.911245881+01:00",
+	"error": "",
+	"finished": true,
+	"id": 2,
+	"output": {
+		"_async": true,
+		"p1": [
+			1,
+			"2",
+			null,
+			4
+		],
+		"p2": {
+			"a": 1,
+			"b": 2
+		}
+	},
+	"startTime": "2018-10-27T11:38:07.911121728+01:00",
+	"success": true
+}
+

job/list can be used to show the running or recently completed jobs

+
$ rclone rc job/list
+{
+	"jobids": [
+		2
+	]
+}
+

Setting config flags with _config

+

If you wish to set config (the equivalent of the global flags) for the +duration of an rc call only then pass in the _config parameter.

+

This should be in the same format as the config key returned by +options/get.

+

For example, if you wished to run a sync with the --checksum +parameter, you would pass this parameter in your JSON blob.

+
"_config":{"CheckSum": true}
+
+

If using rclone rc this could be passed as

+
rclone rc sync/sync ... _config='{"CheckSum": true}'
+
+

Any config parameters you don't set will inherit the global defaults +which were set with command line flags or environment variables.

+

Note that it is possible to set some values as strings or integers - +see data types for more info. Here is an example +setting the equivalent of --buffer-size in string or integer format.

+
"_config":{"BufferSize": "42M"}
+"_config":{"BufferSize": 44040192}
+
+

If you wish to check the _config assignment has worked properly then +calling options/local will show what the value got set to.

+

Setting filter flags with _filter

+

If you wish to set filters for the duration of an rc call only then +pass in the _filter parameter.

+

This should be in the same format as the filter key returned by +options/get.

+

For example, if you wished to run a sync with these flags

+
--max-size 1M --max-age 42s --include "a" --include "b"
+
+

you would pass this parameter in your JSON blob.

+
"_filter":{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}
+
+

If using rclone rc this could be passed as

+
rclone rc ... _filter='{"MaxSize":"1M", "IncludeRule":["a","b"], "MaxAge":"42s"}'
+
+

Any filter parameters you don't set will inherit the global defaults +which were set with command line flags or environment variables.

+

Note that it is possible to set some values as strings or integers - +see data types for more info. Here is an example +setting the equivalent of --buffer-size in string or integer format.

+
"_filter":{"MinSize": "42M"}
+"_filter":{"MinSize": 44040192}
+
+

If you wish to check the _filter assignment has worked properly then +calling options/local will show what the value got set to.

+

Assigning operations to groups with _group = value

+

Each rc call has its own stats group for tracking its metrics. By default +grouping is done by the composite group name from prefix job/ and id of the +job like so job/1.

+

If _group has a value then stats for that request will be grouped under that +value. This allows caller to group stats under their own name.

+

Stats for specific group can be accessed by passing group to core/stats:

+
$ rclone rc --json '{ "group": "job/1" }' core/stats
+{
+	"speed": 12345
+	...
+}
+

Data types

+

When the API returns types, these will mostly be straight forward +integer, string or boolean types.

+

However some of the types returned by the options/get +call and taken by the options/set calls as well as the +vfsOpt, mountOpt and the _config parameters.

+
    +
  • Duration - these are returned as an integer duration in +nanoseconds. They may be set as an integer, or they may be set with +time string, eg "5s". See the options section for +more info.
  • +
  • Size - these are returned as an integer number of bytes. They may +be set as an integer or they may be set with a size suffix string, +eg "10M". See the options section for more info.
  • +
  • Enumerated type (such as CutoffMode, DumpFlags, LogLevel, +VfsCacheMode - these will be returned as an integer and may be set +as an integer but more conveniently they can be set as a string, eg +"HARD" for CutoffMode or DEBUG for LogLevel.
  • +
  • BandwidthSpec - this will be set and returned as a string, eg +"1M".
  • +
+

Option blocks

+

The calls options/info (for the main config) and +config/providers (for the backend config) may be +used to get information on the rclone configuration options. This can +be used to build user interfaces for displaying and setting any rclone +option.

+

These consist of arrays of Option blocks. These have the following +format. Each block describes a single option.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeOptionalDescription
NamestringNname of the option in snake_case
FieldNamestringNname of the field used in the rc - if blank use Name
HelpstringNhelp, started with a single sentence on a single line
GroupsstringYgroups this option belongs to - comma separated string for options classification
ProviderstringYset to filter on provider
DefaultanyNdefault value, if set (and not to nil or "") then Required does nothing
ValueanyNvalue to be set by flags
ExamplesExamplesYpredefined values that can be selected from list (multiple-choice option)
ShortOptstringYthe short command line option for this
HideVisibilityNif non zero, this option is hidden from the configurator or the command line
RequiredboolNthis option is required, meaning value cannot be empty unless there is a default
IsPasswordboolNset if the option is a password
NoPrefixboolNset if the option for this should not use the backend prefix
AdvancedboolNset if this is an advanced config option
ExclusiveboolNset if the answer can only be one of the examples (empty string allowed unless Required or Default is set)
SensitiveboolNset if this option should be redacted when using rclone config redacted
+

An example of this might be the --log-level flag. Note that the +Name of the option becomes the command line flag with _ replaced +with -.

+
{
+    "Advanced": false,
+    "Default": 5,
+    "DefaultStr": "NOTICE",
+    "Examples": [
+        {
+            "Help": "",
+            "Value": "EMERGENCY"
+        },
+        {
+            "Help": "",
+            "Value": "ALERT"
+        },
+        ...
+    ],
+    "Exclusive": true,
+    "FieldName": "LogLevel",
+    "Groups": "Logging",
+    "Help": "Log level DEBUG|INFO|NOTICE|ERROR",
+    "Hide": 0,
+    "IsPassword": false,
+    "Name": "log_level",
+    "NoPrefix": true,
+    "Required": true,
+    "Sensitive": false,
+    "Type": "LogLevel",
+    "Value": null,
+    "ValueStr": "NOTICE"
+},
+

Note that the Help may be multiple lines separated by \n. The +first line will always be a short sentence and this is the sentence +shown when running rclone help flags.

+

Specifying remotes to work on

+

Remotes are specified with the fs=, srcFs=, dstFs= +parameters depending on the command being used.

+

The parameters can be a string as per the rest of rclone, eg +s3:bucket/path or :sftp:/my/dir. They can also be specified as +JSON blobs.

+

If specifying a JSON blob it should be a object mapping strings to +strings. These values will be used to configure the remote. There are +3 special values which may be set:

+
    +
  • type - set to type to specify a remote called :type:
  • +
  • _name - set to name to specify a remote called name:
  • +
  • _root - sets the root of the remote - may be empty
  • +
+

One of _name or type should normally be set. If the local +backend is desired then type should be set to local. If _root +isn't specified then it defaults to the root of the remote.

+

For example this JSON is equivalent to remote:/tmp

+
{
+    "_name": "remote",
+    "_root": "/tmp"
+}
+

And this is equivalent to :sftp,host='example.com':/tmp

+
{
+    "type": "sftp",
+    "host": "example.com",
+    "_root": "/tmp"
+}
+

And this is equivalent to /tmp/dir

+
{
+    type = "local",
+    _root = "/tmp/dir"
+}
+

Supported commands

+ +

backend/command: Runs a backend command.

+

This takes the following parameters:

+
    +
  • command - a string with the command name
  • +
  • fs - a remote name string e.g. "drive:"
  • +
  • arg - a list of arguments for the backend command
  • +
  • opt - a map of string to string of options
  • +
+

Returns:

+
    +
  • result - result from the backend command
  • +
+

Example:

+
rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2
+
+

Returns

+
{
+	"result": {
+		"arg": [
+			"path1",
+			"path2"
+		],
+		"name": "noop",
+		"opt": {
+			"blue": "",
+			"echo": "yes"
+		}
+	}
+}
+

Note that this is the direct equivalent of using this "backend" +command:

+
rclone backend noop . -o echo=yes -o blue path1 path2
+
+

Note that arguments must be preceded by the "-a" flag

+

See the backend command for more information.

+

Authentication is required for this call.

+

cache/expire: Purge a remote from cache

+

Purge a remote from the cache backend. Supports either a directory or a file. +Params:

+
    +
  • remote = path to remote (required)
  • +
  • withData = true/false to delete cached data (chunks) as well (optional)
  • +
+

Eg

+
rclone rc cache/expire remote=path/to/sub/folder/
+rclone rc cache/expire remote=/ withData=true
+
+

cache/fetch: Fetch file chunks

+

Ensure the specified file chunks are cached on disk.

+

The chunks= parameter specifies the file chunks to check. +It takes a comma separated list of array slice indices. +The slice indices are similar to Python slices: start[:end]

+

start is the 0 based chunk number from the beginning of the file +to fetch inclusive. end is 0 based chunk number from the beginning +of the file to fetch exclusive. +Both values can be negative, in which case they count from the back +of the file. The value "-5:" represents the last 5 chunks of a file.

+

Some valid examples are: +":5,-5:" -> the first and last five chunks +"0,-2" -> the first and the second last chunk +"0:10" -> the first ten chunks

+

Any parameter with a key that starts with "file" can be used to +specify files to fetch, e.g.

+
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
+
+

File names will automatically be encrypted when the a crypt remote +is used on top of the cache.

+

cache/stats: Get cache stats

+

Show statistics for the cache remote.

+

config/create: create the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
  • type - type of the new remote
  • +
  • opt - a dictionary of options to control the configuration +
      +
    • obscure - declare passwords are plain and need obscuring
    • +
    • noObscure - declare passwords are already obscured and don't need obscuring
    • +
    • nonInteractive - don't interact with a user, return questions
    • +
    • continue - continue the config process with an answer
    • +
    • all - ask all the config questions not just the post config ones
    • +
    • state - state to restart with - used with continue
    • +
    • result - result to restart with - used with continue
    • +
    +
  • +
+

See the config create command for more information on the above.

+

Authentication is required for this call.

+

config/delete: Delete a remote in the config file.

+

Parameters:

+
    +
  • name - name of remote to delete
  • +
+

See the config delete command for more information on the above.

+

Authentication is required for this call.

+

config/dump: Dumps the config file.

+

Returns a JSON object:

+
    +
  • key: value
  • +
+

Where keys are remote names and values are the config parameters.

+

See the config dump command for more information on the above.

+

Authentication is required for this call.

+

config/get: Get a remote in the config file.

+

Parameters:

+
    +
  • name - name of remote to get
  • +
+

See the config dump command for more information on the above.

+

Authentication is required for this call.

+

config/listremotes: Lists the remotes in the config file and defined in environment variables.

+

Returns

+
    +
  • remotes - array of remote names
  • +
+

See the listremotes command for more information on the above.

+

Authentication is required for this call.

+

config/password: password the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
+

See the config password command for more information on the above.

+

Authentication is required for this call.

+

config/paths: Reads the config file path and other important paths.

+

Returns a JSON object with the following keys:

+
    +
  • config: path to config file
  • +
  • cache: path to root of cache directory
  • +
  • temp: path to root of temporary directory
  • +
+

Eg

+
{
+    "cache": "/home/USER/.cache/rclone",
+    "config": "/home/USER/.rclone.conf",
+    "temp": "/tmp"
+}
+
+

See the config paths command for more information on the above.

+

Authentication is required for this call.

+

config/providers: Shows how providers are configured in the config file.

+

Returns a JSON object:

+
    +
  • providers - array of objects
  • +
+

See the config providers command +for more information on the above.

+

Note that the Options blocks are in the same format as returned by +"options/info". They are described in the +option blocks section.

+

Authentication is required for this call.

+

config/setpath: Set the path of the config file

+

Parameters:

+
    +
  • path - path to the config file to use
  • +
+

Authentication is required for this call.

+

config/update: update the config for a remote.

+

This takes the following parameters:

+
    +
  • name - name of remote
  • +
  • parameters - a map of { "key": "value" } pairs
  • +
  • opt - a dictionary of options to control the configuration +
      +
    • obscure - declare passwords are plain and need obscuring
    • +
    • noObscure - declare passwords are already obscured and don't need obscuring
    • +
    • nonInteractive - don't interact with a user, return questions
    • +
    • continue - continue the config process with an answer
    • +
    • all - ask all the config questions not just the post config ones
    • +
    • state - state to restart with - used with continue
    • +
    • result - result to restart with - used with continue
    • +
    +
  • +
+

See the config update command for more information on the above.

+

Authentication is required for this call.

+

core/bwlimit: Set the bandwidth limit.

+

This sets the bandwidth limit to the string passed in. This should be +a single bandwidth limit entry or a pair of upload:download bandwidth.

+

Eg

+
rclone rc core/bwlimit rate=off
+{
+    "bytesPerSecond": -1,
+    "bytesPerSecondTx": -1,
+    "bytesPerSecondRx": -1,
+    "rate": "off"
+}
+rclone rc core/bwlimit rate=1M
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 1048576,
+    "rate": "1M"
+}
+rclone rc core/bwlimit rate=1M:100k
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 131072,
+    "rate": "1M"
+}
+
+

If the rate parameter is not supplied then the bandwidth is queried

+
rclone rc core/bwlimit
+{
+    "bytesPerSecond": 1048576,
+    "bytesPerSecondTx": 1048576,
+    "bytesPerSecondRx": 1048576,
+    "rate": "1M"
+}
+
+

The format of the parameter is exactly the same as passed to --bwlimit +except only one bandwidth may be specified.

+

In either case "rate" is returned as a human-readable string, and +"bytesPerSecond" is returned as a number.

+

core/command: Run a rclone terminal command over rc.

+

This takes the following parameters:

+
    +
  • command - a string with the command name.
  • +
  • arg - a list of arguments for the backend command.
  • +
  • opt - a map of string to string of options.
  • +
  • returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR"). +
      +
    • Defaults to "COMBINED_OUTPUT" if not set.
    • +
    • The STREAM returnTypes will write the output to the body of the HTTP message.
    • +
    • The COMBINED_OUTPUT will write the output to the "result" parameter.
    • +
    +
  • +
+

Returns:

+
    +
  • result - result from the backend command. +
      +
    • Only set when using returnType "COMBINED_OUTPUT".
    • +
    +
  • +
  • error - set if rclone exits with an error code.
  • +
  • returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
  • +
+

Example:

+
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
+rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
+
+

Returns:

+
{
+	"error": false,
+	"result": "<Raw command line output>"
+}
+
+OR
+{
+	"error": true,
+	"result": "<Raw command line output>"
+}
+

Authentication is required for this call.

+

core/du: Returns disk usage of a locally attached disk.

+

This returns the disk usage for the local directory passed in as dir.

+

If the directory is not passed in, it defaults to the directory +pointed to by --cache-dir.

+
    +
  • dir - string (optional)
  • +
+

Returns:

+
{
+	"dir": "/",
+	"info": {
+		"Available": 361769115648,
+		"Free": 361785892864,
+		"Total": 982141468672
+	}
+}
+

core/gc: Runs a garbage collection.

+

This tells the go runtime to do a garbage collection run. It isn't +necessary to call this normally, but it can be useful for debugging +memory problems.

+

core/group-list: Returns list of stats.

+

This returns list of stats groups currently in memory.

+

Returns the following values:

+
{
+	"groups":  an array of group names:
+		[
+			"group1",
+			"group2",
+			...
+		]
+}
+

core/memstats: Returns the memory statistics

+

This returns the memory statistics of the running program. What the values mean +are explained in the go docs: https://golang.org/pkg/runtime/#MemStats

+

The most interesting values for most people are:

+
    +
  • HeapAlloc - this is the amount of memory rclone is actually using
  • +
  • HeapSys - this is the amount of memory rclone has obtained from the OS
  • +
  • Sys - this is the total amount of memory requested from the OS +
      +
    • It is virtual memory so may include unused memory
    • +
    +
  • +
+

core/obscure: Obscures a string passed in.

+

Pass a clear string and rclone will obscure it for the config file:

+
    +
  • clear - string
  • +
+

Returns:

+
    +
  • obscured - string
  • +
+

core/pid: Return PID of current process

+

This returns PID of current process. +Useful for stopping rclone process.

+

core/quit: Terminates the app.

+

(Optional) Pass an exit code to be used for terminating the app:

+
    +
  • exitCode - int
  • +
+

core/stats: Returns stats about current transfers.

+

This returns all available stats:

+
rclone rc core/stats
+
+

If group is not provided then summed up stats for all groups will be +returned.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

Returns the following values:

+
{
+	"bytes": total transferred bytes since the start of the group,
+	"checks": number of files checked,
+	"deletes" : number of files deleted,
+	"elapsedTime": time in floating point seconds since rclone was started,
+	"errors": number of errors,
+	"eta": estimated time in seconds until the group completes,
+	"fatalError": boolean whether there has been at least one fatal error,
+	"lastError": last error string,
+	"renames" : number of files renamed,
+	"retryError": boolean showing whether there has been at least one non-NoRetryError,
+        "serverSideCopies": number of server side copies done,
+        "serverSideCopyBytes": number bytes server side copied,
+        "serverSideMoves": number of server side moves done,
+        "serverSideMoveBytes": number bytes server side moved,
+	"speed": average speed in bytes per second since start of the group,
+	"totalBytes": total number of bytes in the group,
+	"totalChecks": total number of checks in the group,
+	"totalTransfers": total number of transfers in the group,
+	"transferTime" : total time spent on running jobs,
+	"transfers": number of transferred files,
+	"transferring": an array of currently active file transfers:
+		[
+			{
+				"bytes": total transferred bytes for this file,
+				"eta": estimated time in seconds until file transfer completion
+				"name": name of the file,
+				"percentage": progress of the file transfer in percent,
+				"speed": average speed over the whole transfer in bytes per second,
+				"speedAvg": current speed in bytes per second as an exponentially weighted moving average,
+				"size": size of the file in bytes
+			}
+		],
+	"checking": an array of names of currently active file checks
+		[]
+}
+

Values for "transferring", "checking" and "lastError" are only assigned if data is available. +The value for "eta" is null if an eta cannot be determined.

+

core/stats-delete: Delete stats group.

+

This deletes entire stats group.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

core/stats-reset: Reset stats.

+

This clears counters, errors and finished transfers for all stats or specific +stats group if group is provided.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

core/transferred: Returns stats about completed transfers.

+

This returns stats about completed transfers:

+
rclone rc core/transferred
+
+

If group is not provided then completed transfers for all groups will be +returned.

+

Note only the last 100 completed transfers are returned.

+

Parameters

+
    +
  • group - name of the stats group (string)
  • +
+

Returns the following values:

+
{
+	"transferred":  an array of completed transfers (including failed ones):
+		[
+			{
+				"name": name of the file,
+				"size": size of the file in bytes,
+				"bytes": total transferred bytes for this file,
+				"checked": if the transfer is only checked (skipped, deleted),
+				"timestamp": integer representing millisecond unix epoch,
+				"error": string description of the error (empty if successful),
+				"jobid": id of the job that this transfer belongs to
+			}
+		]
+}
+

core/version: Shows the current version of rclone and the go runtime.

+

This shows the current version of go and the go runtime:

+
    +
  • version - rclone version, e.g. "v1.53.0"
  • +
  • decomposed - version number as [major, minor, patch]
  • +
  • isGit - boolean - true if this was compiled from the git version
  • +
  • isBeta - boolean - true if this is a beta version
  • +
  • os - OS in use as according to Go
  • +
  • arch - cpu architecture in use according to Go
  • +
  • goVersion - version of Go runtime in use
  • +
  • linking - type of rclone executable (static or dynamic)
  • +
  • goTags - space separated build tags or "none"
  • +
+

debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling.

+

SetBlockProfileRate controls the fraction of goroutine blocking events +that are reported in the blocking profile. The profiler aims to sample +an average of one blocking event per rate nanoseconds spent blocked.

+

To include every blocking event in the profile, pass rate = 1. To turn +off profiling entirely, pass rate <= 0.

+

After calling this you can use this to see the blocking profile:

+
go tool pprof http://localhost:5572/debug/pprof/block
+
+

Parameters:

+
    +
  • rate - int
  • +
+

debug/set-gc-percent: Call runtime/debug.SetGCPercent for setting the garbage collection target percentage.

+

SetGCPercent sets the garbage collection target percentage: a collection is triggered +when the ratio of freshly allocated data to live data remaining after the previous collection +reaches this percentage. SetGCPercent returns the previous setting. The initial setting is the +value of the GOGC environment variable at startup, or 100 if the variable is not set.

+

This setting may be effectively reduced in order to maintain a memory limit. +A negative percentage effectively disables garbage collection, unless the memory limit is reached.

+

See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details.

+

Parameters:

+
    +
  • gc-percent - int
  • +
+

debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling.

+

SetMutexProfileFraction controls the fraction of mutex contention +events that are reported in the mutex profile. On average 1/rate +events are reported. The previous rate is returned.

+

To turn off profiling entirely, pass rate 0. To just read the current +rate, pass rate < 0. (For n>1 the details of sampling may change.)

+

Once this is set you can look use this to profile the mutex contention:

+
go tool pprof http://localhost:5572/debug/pprof/mutex
+
+

Parameters:

+
    +
  • rate - int
  • +
+

Results:

+
    +
  • previousRate - int
  • +
+

debug/set-soft-memory-limit: Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime.

+

SetMemoryLimit provides the runtime with a soft memory limit.

+

The runtime undertakes several processes to try to respect this memory limit, including +adjustments to the frequency of garbage collections and returning memory to the underlying +system more aggressively. This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) is executed).

+

The input limit is provided as bytes, and includes all memory mapped, managed, and not +released by the Go runtime. Notably, it does not account for space used by the Go binary +and memory external to Go, such as memory managed by the underlying system on behalf of +the process, or memory managed by non-Go code inside the same process. +Examples of excluded memory sources include: OS kernel memory held on behalf of the process, +memory allocated by C code, and memory mapped by syscall.Mmap (because it is not managed by the Go runtime).

+

A zero limit or a limit that's lower than the amount of memory used by the Go runtime may cause +the garbage collector to run nearly continuously. However, the application may still make progress.

+

The memory limit is always respected by the Go runtime, so to effectively disable this behavior, +set the limit very high. math.MaxInt64 is the canonical value for disabling the limit, but values +much greater than the available memory on the underlying system work just as well.

+

See https://go.dev/doc/gc-guide for a detailed guide explaining the soft memory limit in more detail, +as well as a variety of common use-cases and scenarios.

+

SetMemoryLimit returns the previously set memory limit. A negative input does not adjust the limit, +and allows for retrieval of the currently set memory limit.

+

Parameters:

+
    +
  • mem-limit - int
  • +
+

fscache/clear: Clear the Fs cache.

+

This clears the fs cache. This is where remotes created from backends +are cached for a short while to make repeated rc calls more efficient.

+

If you change the parameters of a backend then you may want to call +this to clear an existing remote out of the cache before re-creating +it.

+

Authentication is required for this call.

+

fscache/entries: Returns the number of entries in the fs cache.

+

This returns the number of entries in the fs cache.

+

Returns

+
    +
  • entries - number of items in the cache
  • +
+

Authentication is required for this call.

+

job/list: Lists the IDs of the running jobs

+

Parameters: None.

+

Results:

+
    +
  • executeId - string id of rclone executing (change after restart)
  • +
  • jobids - array of integer job ids (starting at 1 on each restart)
  • +
+

job/status: Reads the status of the job ID

+

Parameters:

+
    +
  • jobid - id of the job (integer).
  • +
+

Results:

+
    +
  • finished - boolean
  • +
  • duration - time in seconds that the job ran for
  • +
  • endTime - time the job finished (e.g. "2018-10-26T18:50:20.528746884+01:00")
  • +
  • error - error from the job or empty string for no error
  • +
  • finished - boolean whether the job has finished or not
  • +
  • id - as passed in above
  • +
  • startTime - time the job started (e.g. "2018-10-26T18:50:20.528336039+01:00")
  • +
  • success - boolean - true for success false otherwise
  • +
  • output - output of the job as would have been returned if called synchronously
  • +
  • progress - output of the progress related to the underlying job
  • +
+

job/stop: Stop the running job

+

Parameters:

+
    +
  • jobid - id of the job (integer).
  • +
+

job/stopgroup: Stop all running jobs in a group

+

Parameters:

+
    +
  • group - name of the group (string).
  • +
+

mount/listmounts: Show current mount points

+

This shows currently mounted points, which can be used for performing an unmount.

+

This takes no parameters and returns

+
    +
  • mountPoints: list of current mount points
  • +
+

Eg

+
rclone rc mount/listmounts
+
+

Authentication is required for this call.

+

mount/mount: Create a new mount point

+

rclone allows Linux, FreeBSD, macOS and Windows to mount any of +Rclone's cloud storage systems as a file system with FUSE.

+

If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2

+

This takes the following parameters:

+
    +
  • fs - a remote path to be mounted (required)
  • +
  • mountPoint: valid path on the local machine (required)
  • +
  • mountType: one of the values (mount, cmount, mount2) specifies the mount implementation to use
  • +
  • mountOpt: a JSON object with Mount options in.
  • +
  • vfsOpt: a JSON object with VFS options in.
  • +
+

Example:

+
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
+rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
+rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
+
+

The vfsOpt are as described in options/get and can be seen in the the +"vfs" section when running and the mountOpt can be seen in the "mount" section:

+
rclone rc options/get
+
+

Authentication is required for this call.

+

mount/types: Show all possible mount types

+

This shows all possible mount types and returns them as a list.

+

This takes no parameters and returns

+
    +
  • mountTypes: list of mount types
  • +
+

The mount types are strings like "mount", "mount2", "cmount" and can +be passed to mount/mount as the mountType parameter.

+

Eg

+
rclone rc mount/types
+
+

Authentication is required for this call.

+

mount/unmount: Unmount selected active mount

+

rclone allows Linux, FreeBSD, macOS and Windows to +mount any of Rclone's cloud storage systems as a file system with +FUSE.

+

This takes the following parameters:

+
    +
  • mountPoint: valid path on the local machine where the mount was created (required)
  • +
+

Example:

+
rclone rc mount/unmount mountPoint=/home/<user>/mountPoint
+
+

Authentication is required for this call.

+

mount/unmountall: Unmount all active mounts

+

rclone allows Linux, FreeBSD, macOS and Windows to +mount any of Rclone's cloud storage systems as a file system with +FUSE.

+

This takes no parameters and returns error if unmount does not succeed.

+

Eg

+
rclone rc mount/unmountall
+
+

Authentication is required for this call.

+

operations/about: Return the space used on the remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

The result is as returned from rclone about --json

+

See the about command for more information on the above.

+

Authentication is required for this call.

+

operations/check: check the source and destination are the same

+

Checks the files in the source and destination match. It compares +sizes and hashes and logs a report of files that don't +match. It doesn't alter the source or destination.

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • download - check by downloading rather than with hash
  • +
  • checkFileHash - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • checkFileFs - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • checkFileRemote - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
  • +
  • oneWay - check one way only, source files must exist on remote
  • +
  • combined - make a combined report of changes (default false)
  • +
  • missingOnSrc - report all files missing from the source (default true)
  • +
  • missingOnDst - report all files missing from the destination (default true)
  • +
  • match - report all matching files (default false)
  • +
  • differ - report all non-matching files (default true)
  • +
  • error - report all files with errors (hashing or reading) (default true)
  • +
+

If you supply the download flag, it will download the data from +both remotes and check them against each other on the fly. This can +be useful for remotes that don't support hashes or if you really want +to check all the data.

+

If you supply the size-only global flag, it will only compare the sizes not +the hashes as well. Use this for a quick check.

+

If you supply the checkFileHash option with a valid hash name, the +checkFileFs:checkFileRemote must point to a text file in the SUM +format. This treats the checksum file as the source and dstFs as the +destination. Note that srcFs is not used and should not be supplied in +this case.

+

Returns:

+
    +
  • success - true if no error, false otherwise
  • +
  • status - textual summary of check, OK or text string
  • +
  • hashType - hash used in check, may be missing
  • +
  • combined - array of strings of combined report of changes
  • +
  • missingOnSrc - array of strings of all files missing from the source
  • +
  • missingOnDst - array of strings of all files missing from the destination
  • +
  • match - array of strings of all matching files
  • +
  • differ - array of strings of all non-matching files
  • +
  • error - array of strings of all files with errors (hashing or reading)
  • +
+

Authentication is required for this call.

+

operations/cleanup: Remove trashed files in the remote or path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the cleanup command for more information on the above.

+

Authentication is required for this call.

+

operations/copyfile: Copy a file from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • srcRemote - a path within that remote e.g. "file.txt" for the source
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • dstRemote - a path within that remote e.g. "file2.txt" for the destination
  • +
+

Authentication is required for this call.

+

operations/copyurl: Copy the URL to the object

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • url - string, URL to read from
  • +
  • autoFilename - boolean, set to true to retrieve destination file name from url
  • +
+

See the copyurl command for more information on the above.

+

Authentication is required for this call.

+

operations/delete: Remove files in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the delete command for more information on the above.

+

Authentication is required for this call.

+

operations/deletefile: Remove the single file pointed to

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the deletefile command for more information on the above.

+

Authentication is required for this call.

+

operations/fsinfo: Return information about the remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

This returns info about the remote passed in;

+
{
+        // optional features and whether they are available or not
+        "Features": {
+                "About": true,
+                "BucketBased": false,
+                "BucketBasedRootOK": false,
+                "CanHaveEmptyDirectories": true,
+                "CaseInsensitive": false,
+                "ChangeNotify": false,
+                "CleanUp": false,
+                "Command": true,
+                "Copy": false,
+                "DirCacheFlush": false,
+                "DirMove": true,
+                "Disconnect": false,
+                "DuplicateFiles": false,
+                "GetTier": false,
+                "IsLocal": true,
+                "ListR": false,
+                "MergeDirs": false,
+                "MetadataInfo": true,
+                "Move": true,
+                "OpenWriterAt": true,
+                "PublicLink": false,
+                "Purge": true,
+                "PutStream": true,
+                "PutUnchecked": false,
+                "ReadMetadata": true,
+                "ReadMimeType": false,
+                "ServerSideAcrossConfigs": false,
+                "SetTier": false,
+                "SetWrapper": false,
+                "Shutdown": false,
+                "SlowHash": true,
+                "SlowModTime": false,
+                "UnWrap": false,
+                "UserInfo": false,
+                "UserMetadata": true,
+                "WrapFs": false,
+                "WriteMetadata": true,
+                "WriteMimeType": false
+        },
+        // Names of hashes available
+        "Hashes": [
+                "md5",
+                "sha1",
+                "whirlpool",
+                "crc32",
+                "sha256",
+                "dropbox",
+                "mailru",
+                "quickxor"
+        ],
+        "Name": "local",        // Name as created
+        "Precision": 1,         // Precision of timestamps in ns
+        "Root": "/",            // Path as created
+        "String": "Local file system at /", // how the remote will appear in logs
+        // Information about the system metadata for this backend
+        "MetadataInfo": {
+                "System": {
+                        "atime": {
+                                "Help": "Time of last access",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "btime": {
+                                "Help": "Time of file birth (creation)",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "gid": {
+                                "Help": "Group ID of owner",
+                                "Type": "decimal number",
+                                "Example": "500"
+                        },
+                        "mode": {
+                                "Help": "File type and mode",
+                                "Type": "octal, unix style",
+                                "Example": "0100664"
+                        },
+                        "mtime": {
+                                "Help": "Time of last modification",
+                                "Type": "RFC 3339",
+                                "Example": "2006-01-02T15:04:05.999999999Z07:00"
+                        },
+                        "rdev": {
+                                "Help": "Device ID (if special file)",
+                                "Type": "hexadecimal",
+                                "Example": "1abc"
+                        },
+                        "uid": {
+                                "Help": "User ID of owner",
+                                "Type": "decimal number",
+                                "Example": "500"
+                        }
+                },
+                "Help": "Textual help string\n"
+        }
+}
+

This command does not have a command line equivalent so use this instead:

+
rclone rc --loopback operations/fsinfo fs=remote:
+
+

operations/hashsum: Produces a hashsum file for all the objects in the path.

+

Produces a hash file for all the objects in the path using the hash +named. The output is in the same format as the standard +md5sum/sha1sum tool.

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:" for the source, "/" for local filesystem +
      +
    • this can point to a file and just that file will be returned in the listing.
    • +
    +
  • +
  • hashType - type of hash to be used
  • +
  • download - check by downloading rather than with hash (boolean)
  • +
  • base64 - output the hashes in base64 rather than hex (boolean)
  • +
+

If you supply the download flag, it will download the data from the +remote and create the hash on the fly. This can be useful for remotes +that don't support the given hash or if you really want to check all +the data.

+

Note that if you wish to supply a checkfile to check hashes against +the current files then you should use operations/check instead of +operations/hashsum.

+

Returns:

+
    +
  • hashsum - array of strings of the hashes
  • +
  • hashType - type of hash used
  • +
+

Example:

+
$ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
+{
+    "hashType": "md5",
+    "hashsum": [
+        "WTSVLpuiXyJO_kGzJerRLg==  backend-versions.sh",
+        "v1b_OlWCJO9LtNq3EIKkNQ==  bisect-go-rclone.sh",
+        "VHbmHzHh4taXzgag8BAIKQ==  bisect-rclone.sh",
+    ]
+}
+
+

See the hashsum command for more information on the above.

+

Authentication is required for this call.

+

operations/list: List the given remote and path in JSON format

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • opt - a dictionary of options to control the listing (optional) +
      +
    • recurse - If set recurse directories
    • +
    • noModTime - If set return modification time
    • +
    • showEncrypted - If set show decrypted names
    • +
    • showOrigIDs - If set show the IDs for each item if known
    • +
    • showHash - If set return a dictionary of hashes
    • +
    • noMimeType - If set don't show mime types
    • +
    • dirsOnly - If set only show directories
    • +
    • filesOnly - If set only show files
    • +
    • metadata - If set return metadata of objects also
    • +
    • hashTypes - array of strings of hash types to show if showHash set
    • +
    +
  • +
+

Returns:

+
    +
  • list +
      +
    • This is an array of objects as described in the lsjson command
    • +
    +
  • +
+

See the lsjson command for more information on the above and examples.

+

Authentication is required for this call.

+

operations/mkdir: Make a destination directory or container

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the mkdir command for more information on the above.

+

Authentication is required for this call.

+

operations/movefile: Move a file from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
  • +
  • srcRemote - a path within that remote e.g. "file.txt" for the source
  • +
  • dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
  • +
  • dstRemote - a path within that remote e.g. "file2.txt" for the destination
  • +
+

Authentication is required for this call.

+ +

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • unlink - boolean - if set removes the link rather than adding it (optional)
  • +
  • expire - string - the expiry time of the link e.g. "1d" (optional)
  • +
+

Returns:

+
    +
  • url - URL of the resource
  • +
+

See the link command for more information on the above.

+

Authentication is required for this call.

+

operations/purge: Remove a directory or container and all of its contents

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the purge command for more information on the above.

+

Authentication is required for this call.

+

operations/rmdir: Remove an empty directory or container

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the rmdir command for more information on the above.

+

Authentication is required for this call.

+

operations/rmdirs: Remove all the empty directories in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • leaveRoot - boolean, set to true not to delete the root
  • +
+

See the rmdirs command for more information on the above.

+

Authentication is required for this call.

+

operations/settier: Changes storage tier or class on all files in the path

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
+

See the settier command for more information on the above.

+

Authentication is required for this call.

+

operations/settierfile: Changes storage tier or class on the single file pointed to

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
+

See the settierfile command for more information on the above.

+

Authentication is required for this call.

+

operations/size: Count the number of bytes and files in remote

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:path/to/dir"
  • +
+

Returns:

+
    +
  • count - number of files
  • +
  • bytes - number of bytes in those files
  • +
+

See the size command for more information on the above.

+

Authentication is required for this call.

+

operations/stat: Give information about the supplied file or directory

+

This takes the following parameters

+
    +
  • fs - a remote name string eg "drive:"
  • +
  • remote - a path within that remote eg "dir"
  • +
  • opt - a dictionary of options to control the listing (optional) +
      +
    • see operations/list for the options
    • +
    +
  • +
+

The result is

+
    +
  • item - an object as described in the lsjson command. Will be null if not found.
  • +
+

Note that if you are only interested in files then it is much more +efficient to set the filesOnly flag in the options.

+

See the lsjson command for more information on the above and examples.

+

Authentication is required for this call.

+

operations/uploadfile: Upload file using multiform/form-data

+

This takes the following parameters:

+
    +
  • fs - a remote name string e.g. "drive:"
  • +
  • remote - a path within that remote e.g. "dir"
  • +
  • each part in body represents a file to be uploaded
  • +
+

See the uploadfile command for more information on the above.

+

Authentication is required for this call.

+

options/blocks: List all the option blocks

+

Returns:

+
    +
  • options - a list of the options block names
  • +
+

options/get: Get all the global options

+

Returns an object where keys are option block names and values are an +object with the current option values in.

+

Parameters:

+
    +
  • blocks: optional string of comma separated blocks to include +
      +
    • all are included if this is missing or ""
    • +
    +
  • +
+

Note that these are the global options which are unaffected by use of +the _config and _filter parameters. If you wish to read the parameters +set in _config then use options/config and for _filter use options/filter.

+

This shows the internal names of the option within rclone which should +map to the external options very easily with a few exceptions.

+

options/info: Get info about all the global options

+

Returns an object where keys are option block names and values are an +array of objects with info about each options.

+

Parameters:

+
    +
  • blocks: optional string of comma separated blocks to include +
      +
    • all are included if this is missing or ""
    • +
    +
  • +
+

These objects are in the same format as returned by "config/providers". They are +described in the option blocks section.

+

options/local: Get the currently active config for this call

+

Returns an object with the keys "config" and "filter". +The "config" key contains the local config and the "filter" key contains +the local filters.

+

Note that these are the local options specific to this rc call. If +_config was not supplied then they will be the global options. +Likewise with "_filter".

+

This call is mostly useful for seeing if _config and _filter passing +is working.

+

This shows the internal names of the option within rclone which should +map to the external options very easily with a few exceptions.

+

options/set: Set an option

+

Parameters:

+
    +
  • option block name containing an object with +
      +
    • key: value
    • +
    +
  • +
+

Repeated as often as required.

+

Only supply the options you wish to change. If an option is unknown +it will be silently ignored. Not all options will have an effect when +changed like this.

+

For example:

+

This sets DEBUG level logs (-vv) (these can be set by number or string)

+
rclone rc options/set --json '{"main": {"LogLevel": "DEBUG"}}'
+rclone rc options/set --json '{"main": {"LogLevel": 8}}'
+
+

And this sets INFO level logs (-v)

+
rclone rc options/set --json '{"main": {"LogLevel": "INFO"}}'
+
+

And this sets NOTICE level logs (normal without -v)

+
rclone rc options/set --json '{"main": {"LogLevel": "NOTICE"}}'
+
+

pluginsctl/addPlugin: Add a plugin using url

+

Used for adding a plugin to the webgui.

+

This takes the following parameters:

+ +

Example:

+

rclone rc pluginsctl/addPlugin

+

Authentication is required for this call.

+

pluginsctl/getPluginsForType: Get plugins with type criteria

+

This shows all possible plugins by a mime type.

+

This takes the following parameters:

+
    +
  • type - supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3).
  • +
  • pluginType - filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL).
  • +
+

Returns:

+
    +
  • loadedPlugins - list of current production plugins.
  • +
  • testPlugins - list of temporarily loaded development plugins, usually running on a different server.
  • +
+

Example:

+

rclone rc pluginsctl/getPluginsForType type=video/mp4

+

Authentication is required for this call.

+

pluginsctl/listPlugins: Get the list of currently loaded plugins

+

This allows you to get the currently enabled plugins and their details.

+

This takes no parameters and returns:

+
    +
  • loadedPlugins - list of current production plugins.
  • +
  • testPlugins - list of temporarily loaded development plugins, usually running on a different server.
  • +
+

E.g.

+

rclone rc pluginsctl/listPlugins

+

Authentication is required for this call.

+

pluginsctl/listTestPlugins: Show currently loaded test plugins

+

Allows listing of test plugins with the rclone.test set to true in package.json of the plugin.

+

This takes no parameters and returns:

+
    +
  • loadedTestPlugins - list of currently available test plugins.
  • +
+

E.g.

+
rclone rc pluginsctl/listTestPlugins
+
+

Authentication is required for this call.

+

pluginsctl/removePlugin: Remove a loaded plugin

+

This allows you to remove a plugin using it's name.

+

This takes parameters:

+
    +
  • name - name of the plugin in the format author/plugin_name.
  • +
+

E.g.

+

rclone rc pluginsctl/removePlugin name=rclone/video-plugin

+

Authentication is required for this call.

+

pluginsctl/removeTestPlugin: Remove a test plugin

+

This allows you to remove a plugin using it's name.

+

This takes the following parameters:

+
    +
  • name - name of the plugin in the format author/plugin_name.
  • +
+

Example:

+
rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react
+
+

Authentication is required for this call.

+

rc/error: This returns an error

+

This returns an error with the input as part of its error string. +Useful for testing error handling.

+

rc/list: List all the registered remote control commands

+

This lists all the registered remote control commands as a JSON map in +the commands response.

+

rc/noop: Echo the input to the output parameters

+

This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.

+

rc/noopauth: Echo the input to the output parameters requiring auth

+

This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.

+

Authentication is required for this call.

+

sync/bisync: Perform bidirectional synchronization between two paths.

+

This takes the following parameters

+
    +
  • path1 - a remote directory string e.g. drive:path1
  • +
  • path2 - a remote directory string e.g. drive:path2
  • +
  • dryRun - dry-run mode
  • +
  • resync - performs the resync run
  • +
  • checkAccess - abort if RCLONE_TEST files are not found on both filesystems
  • +
  • checkFilename - file name for checkAccess (default: RCLONE_TEST)
  • +
  • maxDelete - abort sync if percentage of deleted files is above +this threshold (default: 50)
  • +
  • force - Bypass maxDelete safety check and run the sync
  • +
  • checkSync - true by default, false disables comparison of final listings, +only will skip sync, only compare listings from the last run
  • +
  • createEmptySrcDirs - Sync creation and deletion of empty directories. +(Not compatible with --remove-empty-dirs)
  • +
  • removeEmptyDirs - remove empty directories at the final cleanup step
  • +
  • filtersFile - read filtering patterns from a file
  • +
  • ignoreListingChecksum - Do not use checksums for listings
  • +
  • resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. +Use at your own risk!
  • +
  • workdir - server directory for history files (default: ~/.cache/rclone/bisync)
  • +
  • backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
  • +
  • backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
  • +
  • noCleanup - retain working files
  • +
+

See bisync command help +and full bisync description +for more information.

+

Authentication is required for this call.

+

sync/copy: copy a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
+

See the copy command for more information on the above.

+

Authentication is required for this call.

+

sync/move: move a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
  • deleteEmptySrcDirs - delete empty src directories if set
  • +
+

See the move command for more information on the above.

+

Authentication is required for this call.

+

sync/sync: sync a directory from source remote to destination remote

+

This takes the following parameters:

+
    +
  • srcFs - a remote name string e.g. "drive:src" for the source
  • +
  • dstFs - a remote name string e.g. "drive:dst" for the destination
  • +
  • createEmptySrcDirs - create empty src directories on destination if set
  • +
+

See the sync command for more information on the above.

+

Authentication is required for this call.

+

vfs/forget: Forget files or directories in the directory cache.

+

This forgets the paths in the directory cache causing them to be +re-read from the remote when needed.

+

If no paths are passed in then it will forget all the paths in the +directory cache.

+
rclone rc vfs/forget
+
+

Otherwise pass files or dirs in as file=path or dir=path. Any +parameter key starting with file will forget that file and any +starting with dir will forget that dir, e.g.

+
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
+
+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/list: List active VFSes.

+

This lists the active VFSes.

+

It returns a list under the key "vfses" where the values are the VFS +names that could be passed to the other VFS commands in the "fs" +parameter.

+

vfs/poll-interval: Get the status or update the value of the poll-interval option.

+

Without any parameter given this returns the current status of the +poll-interval setting.

+

When the interval=duration parameter is set, the poll-interval value +is updated and the polling function is notified. +Setting interval=0 disables poll-interval.

+
rclone rc vfs/poll-interval interval=5m
+
+

The timeout=duration parameter can be used to specify a time to wait +for the current poll function to apply the new value. +If timeout is less or equal 0, which is the default, wait indefinitely.

+

The new poll-interval value will only be active when the timeout is +not reached.

+

If poll-interval is updated or disabled temporarily, some changes +might not get picked up by the polling function, depending on the +used remote.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/queue: Queue info for a VFS.

+

This returns info about the upload queue for the selected VFS.

+

This is only useful if --vfs-cache-mode > off. If you call it when +the --vfs-cache-mode is off, it will return an empty result.

+
{
+    "queued": // an array of files queued for upload
+    [
+        {
+            "name":      "file",   // string: name (full path) of the file,
+            "id":        123,      // integer: id of this item in the queue,
+            "size":      79,       // integer: size of the file in bytes
+            "expiry":    1.5       // float: time until file is eligible for transfer, lowest goes first
+            "tries":     1,        // integer: number of times we have tried to upload
+            "delay":     5.0,      // float: seconds between upload attempts
+            "uploading": false,    // boolean: true if item is being uploaded
+        },
+   ],
+}
+
+

The expiry time is the time until the file is eligible for being +uploaded in floating point seconds. This may go negative. As rclone +only transfers --transfers files at once, only the lowest +--transfers expiry times will have uploading as true. So there +may be files with negative expiry times for which uploading is +false.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/queue-set-expiry: Set the expiry time for an item queued for upload.

+

Use this to adjust the expiry time for an item in the upload queue. +You will need to read the id of the item using vfs/queue before +using this call.

+

You can then set expiry to a floating point number of seconds from +now when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed +for a long time then set it to a large positive number.

+

Setting the expiry of an item which has already has started uploading +will have no effect - the item will carry on being uploaded.

+

This will return an error if called with --vfs-cache-mode off or if +the id passed is not found.

+

This takes the following parameters

+
    +
  • fs - select the VFS in use (optional)
  • +
  • id - a numeric ID as returned from vfs/queue
  • +
  • expiry - a new expiry time as floating point seconds
  • +
  • relative - if set, expiry is to be treated as relative to the current expiry (optional, boolean)
  • +
+

This returns an empty result on success, or an error.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/refresh: Refresh the directory cache.

+

This reads the directories for the specified paths and freshens the +directory cache.

+

If no paths are passed in then it will refresh the root directory.

+
rclone rc vfs/refresh
+
+

Otherwise pass directories in as dir=path. Any parameter key +starting with dir will refresh that directory, e.g.

+
rclone rc vfs/refresh dir=home/junk dir2=data/misc
+
+

If the parameter recursive=true is given the whole directory tree +will get refreshed. This refresh will use --fast-list if enabled.

+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+

vfs/stats: Stats for a VFS.

+

This returns stats for the selected VFS.

+
{
+    // Status of the disk cache - only present if --vfs-cache-mode > off
+    "diskCache": {
+        "bytesUsed": 0,
+        "erroredFiles": 0,
+        "files": 0,
+        "hashType": 1,
+        "outOfSpace": false,
+        "path": "/home/user/.cache/rclone/vfs/local/mnt/a",
+        "pathMeta": "/home/user/.cache/rclone/vfsMeta/local/mnt/a",
+        "uploadsInProgress": 0,
+        "uploadsQueued": 0
+    },
+    "fs": "/mnt/a",
+    "inUse": 1,
+    // Status of the in memory metadata cache
+    "metadataCache": {
+        "dirs": 1,
+        "files": 0
+    },
+    // Options as returned by options/get
+    "opt": {
+        "CacheMaxAge": 3600000000000,
+        // ...
+        "WriteWait": 1000000000
+    }
+}
+
+

This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied.

+ +

Accessing the remote control via HTTP

+

Rclone implements a simple HTTP based protocol.

+

Each endpoint takes an JSON object and returns a JSON object or an +error. The JSON objects are essentially a map of string names to +values.

+

All calls must made using POST.

+

The input objects can be supplied using URL parameters, POST +parameters or by supplying "Content-Type: application/json" and a JSON +blob in the body. There are examples of these below using curl.

+

The response will be a JSON blob in the body of the response. This is +formatted to be reasonably human-readable.

+

Error returns

+

If an error occurs then there will be an HTTP error status (e.g. 500) +and the body of the response will contain a JSON encoded error object, +e.g.

+
{
+    "error": "Expecting string value for key \"remote\" (was float64)",
+    "input": {
+        "fs": "/tmp",
+        "remote": 3
+    },
+    "status": 400
+    "path": "operations/rmdir",
+}
+

The keys in the error response are

+
    +
  • error - error string
  • +
  • input - the input parameters to the call
  • +
  • status - the HTTP status code
  • +
  • path - the path of the call
  • +
+

CORS

+

The sever implements basic CORS support and allows all origins for that. +The response to a preflight OPTIONS request will echo the requested "Access-Control-Request-Headers" back.

+

Using POST with URL parameters only

+
curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2'
+

Response

+
{
+	"potato": "1",
+	"sausage": "2"
+}
+

Here is what an error response looks like:

+
curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
+
{
+	"error": "arbitrary error on input map[potato:1 sausage:2]",
+	"input": {
+		"potato": "1",
+		"sausage": "2"
+	}
+}
+

Note that curl doesn't return errors to the shell unless you use the -f option

+
$ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
+curl: (22) The requested URL returned error: 400 Bad Request
+$ echo $?
+22
+

Using POST with a form

+
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop
+

Response

+
{
+	"potato": "1",
+	"sausage": "2"
+}
+

Note that you can combine these with URL parameters too with the POST +parameters taking precedence.

+
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4"
+

Response

+
{
+	"potato": "1",
+	"rutabaga": "3",
+	"sausage": "4"
+}
+

Using POST with a JSON blob

+
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop
+

response

+
{
+	"password": "xyz",
+	"username": "xyz"
+}
+

This can be combined with URL parameters too if required. The JSON +blob takes precedence.

+
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4'
+
{
+	"potato": 2,
+	"rutabaga": "3",
+	"sausage": 1
+}
+

Debugging rclone with pprof

+

If you use the --rc flag this will also enable the use of the go +profiling tools on the same port.

+

To use these, first install go.

+

Debugging memory use

+

To profile rclone's memory use you can run:

+
go tool pprof -web http://localhost:5572/debug/pprof/heap
+
+

This should open a page in your browser showing what is using what +memory.

+

You can also use the -text flag to produce a textual summary

+
$ go tool pprof -text http://localhost:5572/debug/pprof/heap
+Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
+      flat  flat%   sum%        cum   cum%
+ 1024.03kB 66.62% 66.62%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.addDecoderNode
+     513kB 33.38%   100%      513kB 33.38%  net/http.newBufioWriterSize
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/all.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/serve.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/cmd/serve/restic.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.init
+         0     0%   100%  1024.03kB 66.62%  github.com/rclone/rclone/vendor/golang.org/x/net/http2/hpack.init.0
+         0     0%   100%  1024.03kB 66.62%  main.init
+         0     0%   100%      513kB 33.38%  net/http.(*conn).readRequest
+         0     0%   100%      513kB 33.38%  net/http.(*conn).serve
+         0     0%   100%  1024.03kB 66.62%  runtime.main
+

Debugging go routine leaks

+

Memory leaks are most often caused by go routine leaks keeping memory +alive which should have been garbage collected.

+

See all active go routines using

+
curl http://localhost:5572/debug/pprof/goroutine?debug=1
+
+

Or go to http://localhost:5572/debug/pprof/goroutine?debug=1 in your browser.

+

Other profiles to look at

+

You can see a summary of profiles available at http://localhost:5572/debug/pprof/

+

Here is how to use some of them:

+
    +
  • Memory: go tool pprof http://localhost:5572/debug/pprof/heap
  • +
  • Go routines: curl http://localhost:5572/debug/pprof/goroutine?debug=1
  • +
  • 30-second CPU profile: go tool pprof http://localhost:5572/debug/pprof/profile
  • +
  • 5-second execution trace: wget http://localhost:5572/debug/pprof/trace?seconds=5
  • +
  • Goroutine blocking profile +
      +
    • Enable first with: rclone rc debug/set-block-profile-rate rate=1 (docs)
    • +
    • go tool pprof http://localhost:5572/debug/pprof/block
    • +
    +
  • +
  • Contended mutexes: +
      +
    • Enable first with: rclone rc debug/set-mutex-profile-fraction rate=1 (docs)
    • +
    • go tool pprof http://localhost:5572/debug/pprof/mutex
    • +
    +
  • +
+

See the net/http/pprof docs +for more info on how to use the profiling and for a general overview +see the Go team's blog post on profiling go programs.

+

The profiling hook is zero overhead unless it is used.

+ + +
+
+ +
+
+ Contents +
+
+ +

+
+
+ + +
+
+ Gold Sponsor +
+
+
+
+
+ + + +
+
+ Share and Enjoy +
+
+ +
+
+ +
+
+ Links +
+ +
+ +
+
+ + + +
+ + + + + + + + \ No newline at end of file diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts index 74e68b5853..157ab5f505 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone-api.service.ts @@ -5,18 +5,27 @@ import { existsSync } from 'node:fs'; import { mkdir, rm, writeFile } from 'node:fs/promises'; import { dirname, join } from 'node:path'; +import { convert } from 'convert'; import { execa } from 'execa'; import got, { HTTPError } from 'got'; import pRetry from 'p-retry'; import { sanitizeParams } from '@app/core/log.js'; +import { + getConfigIdFromGroupId, + isBackupJobGroup, +} from '@app/unraid-api/graph/resolvers/backup/backup.utils.js'; +import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js'; import { CreateRCloneRemoteDto, DeleteRCloneRemoteDto, GetRCloneJobStatusDto, GetRCloneRemoteConfigDto, GetRCloneRemoteDetailsDto, - RCloneProviderOptionResponse, + RCloneJob, + RCloneJobListResponse, + RCloneJobStats, RCloneProviderResponse, RCloneRemoteConfig, RCloneStartBackupInput, @@ -24,72 +33,109 @@ import { } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; import { validateObject } from '@app/unraid-api/graph/resolvers/validation.utils.js'; +// Constants for the service +const CONSTANTS = { + LOG_LEVEL: { + DEBUG: 'DEBUG', + INFO: 'INFO', + }, + RETRY_CONFIG: { + retries: 6, + minTimeout: 100, + maxTimeout: 5000, + factor: 2, + maxRetryTime: 30000, + }, + TIMEOUTS: { + GRACEFUL_SHUTDOWN: 2000, + PROCESS_CLEANUP: 1000, + }, +}; + +// Internal interface for job status response from RClone API +interface RCloneJobStatusResponse { + id?: string | number; + group?: string; + stats?: RCloneJobStats; + finished?: boolean; + error?: string; + [key: string]: any; +} + +interface BackupStatusResult { + isRunning: boolean; + stats: RCloneJobStats | null; + jobCount: number; + activeJobs: RCloneJobStatusResponse[]; +} + +interface JobOperationResult { + stopped: string[]; + forgotten?: string[]; + errors: string[]; +} + @Injectable() export class RCloneApiService implements OnModuleInit, OnModuleDestroy { - private isInitialized: boolean = false; + private initialized: boolean = false; private readonly logger = new Logger(RCloneApiService.name); private rcloneSocketPath: string = ''; private rcloneBaseUrl: string = ''; private rcloneProcess: ChildProcess | null = null; private readonly rcloneUsername: string = - process.env.RCLONE_USERNAME || crypto.randomBytes(12).toString('base64'); + process.env.RCLONE_USERNAME || + (process.env.NODE_ENV === 'test' ? 'test-user' : crypto.randomBytes(12).toString('hex')); private readonly rclonePassword: string = - process.env.RCLONE_PASSWORD || crypto.randomBytes(24).toString('base64'); - constructor() {} + process.env.RCLONE_PASSWORD || + (process.env.NODE_ENV === 'test' ? 'test-pass' : crypto.randomBytes(24).toString('hex')); - /** - * Returns whether the RClone service is initialized and ready to use - */ - get initialized(): boolean { - return this.isInitialized; + constructor(private readonly statusService: RCloneStatusService) {} + + get isInitialized(): boolean { + return this.initialized; } async onModuleInit(): Promise { - try { - // Check if rclone binary is available first - const isBinaryAvailable = await this.checkRcloneBinaryExists(); - if (!isBinaryAvailable) { - this.logger.warn('RClone binary not found on system, skipping initialization'); - this.isInitialized = false; - return; - } + // Check if rclone binary is available first + const isBinaryAvailable = await this.checkRcloneBinaryExists(); + if (!isBinaryAvailable) { + this.logger.warn('RClone binary not found on system, skipping initialization'); + this.initialized = false; + return; + } - const { getters } = await import('@app/store/index.js'); - // Check if Rclone Socket is running, if not, start it. - this.rcloneSocketPath = getters.paths()['rclone-socket']; - const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log'); - this.logger.log(`RClone socket path: ${this.rcloneSocketPath}`); - this.logger.log(`RClone log file path: ${logFilePath}`); + const { getters } = await import('@app/store/index.js'); + // Check if Rclone Socket is running, if not, start it. + this.rcloneSocketPath = getters.paths()['rclone-socket']; + const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log'); + this.logger.log(`RClone socket path: ${this.rcloneSocketPath}`); + this.logger.log(`RClone log file path: ${logFilePath}`); - // Format the base URL for Unix socket - this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`; + // Format the base URL for Unix socket + this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`; - // Check if the RClone socket exists, if not, create it. - const socketExists = await this.checkRcloneSocketExists(this.rcloneSocketPath); + // Check if the RClone socket exists, if not, create it. + const socketExists = await this.checkRcloneSocketExists(this.rcloneSocketPath); - if (socketExists) { - const isRunning = await this.checkRcloneSocketRunning(); - if (isRunning) { - this.isInitialized = true; - return; - } else { - this.logger.warn( - 'RClone socket is not running but socket exists, removing socket before starting...' - ); - await rm(this.rcloneSocketPath, { force: true }); - } - - this.logger.warn('RClone socket is not running, starting it...'); - this.isInitialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath); + if (socketExists) { + const isRunning = await this.checkRcloneSocketRunning(); + if (isRunning) { + this.initialized = true; return; } else { - this.logger.warn('RClone socket does not exist, creating it...'); - this.isInitialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath); - return; + this.logger.warn( + 'RClone socket is not running but socket exists, removing socket before starting...' + ); + await rm(this.rcloneSocketPath, { force: true }); } - } catch (error: unknown) { - this.logger.error(`Error initializing RCloneApiService: ${error}`); - this.isInitialized = false; + + this.logger.warn('RClone socket is not running, starting it...'); + this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath); + return; + } else { + this.logger.warn('RClone socket does not exist, creating it...'); + this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath); + return; } } @@ -98,95 +144,145 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { this.logger.log('RCloneApiService module destroyed'); } - /** - * Starts the RClone RC daemon on the specified socket path - */ + private async initializeRCloneService(): Promise { + const { getters } = await import('@app/store/index.js'); + this.rcloneSocketPath = getters.paths()['rclone-socket']; + const logFilePath = join(getters.paths()['log-base'], 'rclone-unraid-api.log'); + + this.rcloneBaseUrl = `http://unix:${this.rcloneSocketPath}:`; + this.logger.log( + `Ensuring RClone is stopped and socket is clean before initialization. Socket path: ${this.rcloneSocketPath}` + ); + + // Stop any existing rclone instances and remove the socket file. + await this.stopRcloneSocket(); + + this.logger.warn('Proceeding to start new RClone socket...'); + this.initialized = await this.startRcloneSocket(this.rcloneSocketPath, logFilePath); + } + private async startRcloneSocket(socketPath: string, logFilePath: string): Promise { try { - // Make log file exists - if (!existsSync(logFilePath)) { - this.logger.debug(`Creating log file: ${logFilePath}`); - await mkdir(dirname(logFilePath), { recursive: true }); - await writeFile(logFilePath, '', 'utf-8'); - } + await this.ensureLogFileExists(logFilePath); + + const rcloneArgs = this.buildRcloneArgs(socketPath, logFilePath); this.logger.log(`Starting RClone RC daemon on socket: ${socketPath}`); - // Start the process but don't wait for it to finish - this.rcloneProcess = execa( - 'rclone', - [ - 'rcd', - '--rc-addr', - socketPath, - '--log-level', - 'INFO', - '--log-file', - logFilePath, - ...(this.rcloneUsername ? ['--rc-user', this.rcloneUsername] : []), - ...(this.rclonePassword ? ['--rc-pass', this.rclonePassword] : []), - ], - { detached: false } // Keep attached to manage lifecycle - ); - // Handle potential errors during process spawning (e.g., command not found) - this.rcloneProcess.on('error', (error: Error) => { - this.logger.error(`RClone process failed to start: ${error.message}`); - this.rcloneProcess = null; // Clear the handle on error - this.isInitialized = false; - }); + const rcloneProcessExecution = execa('rclone', rcloneArgs, { detached: false }); + this.rcloneProcess = rcloneProcessExecution; + this.setupProcessListeners(); - // Handle unexpected exit - this.rcloneProcess.on('exit', (code, signal) => { - this.logger.warn( - `RClone process exited unexpectedly with code: ${code}, signal: ${signal}` + rcloneProcessExecution.catch((error) => { + this.logger.debug( + `Rclone process execution promise rejected (expected if process failed to start or exited prematurely): ${ + error.shortMessage || error.message + }` ); - this.rcloneProcess = null; - this.isInitialized = false; }); - // Wait for socket to be ready using p-retry with exponential backoff - await pRetry( - async () => { - const isRunning = await this.checkRcloneSocketRunning(); - if (!isRunning) throw new Error('Rclone socket not ready'); - }, - { - retries: 6, // 7 attempts total - minTimeout: 100, - maxTimeout: 5000, - factor: 2, - maxRetryTime: 30000, - } - ); - + await this.waitForSocketReady(); + this.logger.log('RClone RC daemon started and socket is ready.'); return true; } catch (error: unknown) { - this.logger.error(`Error starting RClone RC daemon: ${error}`); - this.rcloneProcess?.kill(); // Attempt to kill if started but failed later - this.rcloneProcess = null; + this.logger.error(`Error during RClone RC daemon startup sequence: ${error}`); + this.cleanupFailedProcess(); return false; } } + private async ensureLogFileExists(logFilePath: string): Promise { + if (!existsSync(logFilePath)) { + await mkdir(dirname(logFilePath), { recursive: true }); + await writeFile(logFilePath, '', 'utf-8'); + } + } + + private buildRcloneArgs(socketPath: string, logFilePath: string): string[] { + // Unix sockets don't require HTTP authentication - the socket itself provides security + const isUnixSocket = socketPath.startsWith('/'); + + if (isUnixSocket) { + this.logger.log('Using Unix socket - HTTP authentication not required, using --rc-no-auth'); + } else { + this.logger.log( + `Building RClone args with username: ${this.rcloneUsername ? '[SET]' : '[NOT SET]'}, password: ${this.rclonePassword ? '[SET]' : '[NOT SET]'}` + ); + } + + const args = [ + 'rcd', + '--rc-addr', + socketPath, + '--log-level', + 'INFO', + '--log-file', + logFilePath, + // For Unix sockets, use --rc-no-auth instead of credentials + ...(isUnixSocket ? ['--rc-no-auth'] : []), + // Only add authentication for non-Unix socket connections + ...(!isUnixSocket && this.rcloneUsername ? ['--rc-user', this.rcloneUsername] : []), + ...(!isUnixSocket && this.rclonePassword ? ['--rc-pass', this.rclonePassword] : []), + ]; + + this.logger.log(`RClone command args: ${args.join(' ')}`); + return args; + } + + private setupProcessListeners(): void { + if (!this.rcloneProcess) return; + + this.rcloneProcess.on('error', (error: Error) => { + this.logger.error(`RClone process failed to start: ${error.message}`); + this.cleanupFailedProcess(); + }); + + this.rcloneProcess.on('exit', (code, signal) => { + this.logger.warn(`RClone process exited unexpectedly with code: ${code}, signal: ${signal}`); + this.cleanupFailedProcess(); + }); + } + + private cleanupFailedProcess(): void { + this.rcloneProcess = null; + this.initialized = false; + } + + private async waitForSocketReady(): Promise { + await pRetry(async () => { + const isRunning = await this.checkRcloneSocketRunning(); + if (!isRunning) throw new Error('Rclone socket not ready'); + }, CONSTANTS.RETRY_CONFIG); + } + private async stopRcloneSocket(): Promise { if (this.rcloneProcess && !this.rcloneProcess.killed) { - this.logger.log(`Stopping RClone RC daemon process (PID: ${this.rcloneProcess.pid})...`); - try { - const killed = this.rcloneProcess.kill('SIGTERM'); // Send SIGTERM first - if (!killed) { - this.logger.warn('Failed to kill RClone process with SIGTERM, trying SIGKILL.'); - this.rcloneProcess.kill('SIGKILL'); // Force kill if SIGTERM failed - } - this.logger.log('RClone process stopped.'); - } catch (error: unknown) { - this.logger.error(`Error stopping RClone process: ${error}`); - } finally { - this.rcloneProcess = null; // Clear the handle + await this.terminateProcess(); + } + + await this.killExistingRcloneProcesses(); + await this.removeSocketFile(); + } + + private async terminateProcess(): Promise { + if (!this.rcloneProcess) return; + + this.logger.log(`Stopping RClone RC daemon process (PID: ${this.rcloneProcess.pid})...`); + + try { + const killed = this.rcloneProcess.kill('SIGTERM'); + if (!killed) { + this.logger.warn('Failed to kill with SIGTERM, using SIGKILL'); + this.rcloneProcess.kill('SIGKILL'); } - } else { - this.logger.log('RClone process not running or already stopped.'); + this.logger.log('RClone process stopped'); + } catch (error: unknown) { + this.logger.error(`Error stopping RClone process: ${error}`); + } finally { + this.rcloneProcess = null; } + } - // Clean up the socket file if it exists + private async removeSocketFile(): Promise { if (this.rcloneSocketPath && existsSync(this.rcloneSocketPath)) { this.logger.log(`Removing RClone socket file: ${this.rcloneSocketPath}`); try { @@ -197,36 +293,19 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { } } - /** - * Checks if the RClone socket exists - */ private async checkRcloneSocketExists(socketPath: string): Promise { const socketExists = existsSync(socketPath); if (!socketExists) { this.logger.warn(`RClone socket does not exist at: ${socketPath}`); - return false; } - return true; + return socketExists; } - /** - * Checks if the RClone socket is running - */ private async checkRcloneSocketRunning(): Promise { try { - // A simple API call to check if the daemon is responsive await this.callRcloneApi('core/pid'); - this.logger.debug('RClone socket is running and responsive.'); return true; - } catch (error: unknown) { - // Silently handle socket connection errors during checks - if (error instanceof Error) { - if (error.message.includes('ENOENT') || error.message.includes('ECONNREFUSED')) { - this.logger.debug('RClone socket not accessible - daemon likely not running'); - } else { - this.logger.debug(`RClone socket check failed: ${error.message}`); - } - } + } catch { return false; } } @@ -267,18 +346,11 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return response?.remotes || []; } - /** - * Get complete remote details - */ async getRemoteDetails(input: GetRCloneRemoteDetailsDto): Promise { await validateObject(GetRCloneRemoteDetailsDto, input); - const config = (await this.getRemoteConfig({ name: input.name })) || {}; - return config as RCloneRemoteConfig; + return this.getRemoteConfig({ name: input.name }); } - /** - * Get configuration of a remote - */ async getRemoteConfig(input: GetRCloneRemoteConfigDto): Promise { await validateObject(GetRCloneRemoteConfigDto, input); return this.callRcloneApi('config/get', { name: input.name }); @@ -300,77 +372,329 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { return result; } - /** - * Update an existing remote configuration - */ - async updateRemote(input: UpdateRCloneRemoteDto): Promise { + async updateRemote(input: UpdateRCloneRemoteDto): Promise { await validateObject(UpdateRCloneRemoteDto, input); this.logger.log(`Updating remote: ${input.name}`); - const params = { + + return this.callRcloneApi('config/update', { name: input.name, ...input.parameters, - }; - return this.callRcloneApi('config/update', params); + }); } - /** - * Delete a remote configuration - */ - async deleteRemote(input: DeleteRCloneRemoteDto): Promise { + async deleteRemote(input: DeleteRCloneRemoteDto): Promise { await validateObject(DeleteRCloneRemoteDto, input); this.logger.log(`Deleting remote: ${input.name}`); return this.callRcloneApi('config/delete', { name: input.name }); } - /** - * Start a backup operation using sync/copy - * This copies a directory from source to destination - */ - async startBackup(input: RCloneStartBackupInput): Promise { + async startBackup(input: RCloneStartBackupInput): Promise { await validateObject(RCloneStartBackupInput, input); - this.logger.log(`Starting backup from ${input.srcPath} to ${input.dstPath}`); + + this.logger.log(`Starting backup: ${input.srcPath} → ${input.dstPath}`); + + const group = input.configId ? getConfigIdFromGroupId(input.configId) : 'manual'; + const params = { srcFs: input.srcPath, dstFs: input.dstPath, + ...(input.async && { _async: input.async }), + _group: group, ...(input.options || {}), }; - return this.callRcloneApi('sync/copy', params); + + const result = await this.callRcloneApi('sync/copy', params); + const jobId = result.jobid || result.jobId || 'unknown'; + this.logger.log(`Backup job created with ID: ${jobId} in group: ${group}`); + + return result; } /** - * Get the status of a running job + * Gets enhanced job status with computed fields */ - async getJobStatus(input: GetRCloneJobStatusDto): Promise { - await validateObject(GetRCloneJobStatusDto, input); - return this.callRcloneApi('job/status', { jobid: input.jobId }); + async getEnhancedJobStatus(jobId: string, configId?: string): Promise { + try { + await validateObject(GetRCloneJobStatusDto, { jobId }); + + if (isBackupJobGroup(jobId)) { + try { + const stats = await this.callRcloneApi('core/stats', { group: jobId }); + const enhancedStats = this.statusService.enhanceStatsWithFormattedFields({ + ...stats, + group: jobId, + }); + + const job = this.statusService.transformStatsToJob(jobId, enhancedStats); + job.configId = configId || getConfigIdFromGroupId(jobId); + + // Add computed fields + job.isRunning = job.status === BackupJobStatus.RUNNING; + job.errorMessage = job.error || undefined; + + return job; + } catch (error) { + this.logger.warn(`Failed to get group stats for ${jobId}: ${error}`); + } + } + + // Fallback to individual job status + const jobStatus = await this.getIndividualJobStatus(jobId); + const enhancedStats = jobStatus.stats + ? this.statusService.enhanceStatsWithFormattedFields(jobStatus.stats) + : {}; + + const job = this.statusService.transformStatsToJob(jobId, enhancedStats); + + // Add computed fields + job.isRunning = job.status === BackupJobStatus.RUNNING; + job.errorMessage = job.error || undefined; + + // Add configId if provided + if (configId) { + job.configId = configId; + } + + return job; + } catch (error) { + this.logger.error(`Failed to fetch enhanced job status for ${jobId}: %o`, error); + return null; + } } - /** - * List all running jobs - */ - async listRunningJobs(): Promise { + async getJobStatus(input: GetRCloneJobStatusDto): Promise { + const enhancedJob = await this.getEnhancedJobStatus(input.jobId); + if (enhancedJob) { + return enhancedJob; + } + + // Final fallback + const jobStatus = await this.getIndividualJobStatus(input.jobId); + return this.statusService.parseJobWithStats(input.jobId, jobStatus); + } + + async getIndividualJobStatus(jobId: string): Promise { + this.logger.debug(`Fetching status for job ${jobId}`); + const result = await this.callRcloneApi('job/status', { jobid: jobId }); + + if (result.error) { + this.logger.warn(`Job ${jobId} has error: ${result.error}`); + } + + return result; + } + + async listRunningJobs(): Promise { + this.logger.debug('Fetching job list from RClone API'); return this.callRcloneApi('job/list'); } - /** - * Generic method to call the RClone RC API - */ - private async callRcloneApi(endpoint: string, params: Record = {}): Promise { - const url = `${this.rcloneBaseUrl}/${endpoint}`; + async getAllJobsWithStats(): Promise { try { - this.logger.debug( - `Calling RClone API: ${url} with params: ${JSON.stringify(sanitizeParams(params))}` + // Get both the job list and group list + const [runningJobs, groupList] = await Promise.all([ + this.listRunningJobs(), + this.callRcloneApi('core/group-list'), + ]); + + this.logger.debug(`Running jobs: ${JSON.stringify(runningJobs)}`); + this.logger.debug(`Group list: ${JSON.stringify(groupList)}`); + + // Safety check: if too many groups, something is wrong + if (groupList.groups && groupList.groups.length > 100) { + this.logger.error( + `DANGER: Found ${groupList.groups.length} groups, aborting to prevent job explosion` + ); + return []; + } + + // Safety check: if too many individual jobs, something is wrong + if (runningJobs.jobids && runningJobs.jobids.length > 1000) { + this.logger.error( + `DANGER: Found ${runningJobs.jobids.length} individual jobs, aborting to prevent performance issues` + ); + return []; + } + + if (!runningJobs.jobids?.length) { + this.logger.debug('No running jobs found'); + return []; + } + + const backupGroups = (groupList.groups || []).filter((group: string) => + isBackupJobGroup(group) + ); + + if (backupGroups.length === 0) { + this.logger.debug('No backup groups found'); + return []; + } + + // Get group stats for all backup groups to get proper stats and group info + const groupStatsMap = new Map(); + await Promise.all( + backupGroups.map(async (group: string) => { + try { + const stats = await this.callRcloneApi('core/stats', { group }); + groupStatsMap.set(group, stats); + } catch (error) { + this.logger.warn(`Failed to get stats for group ${group}: ${error}`); + } + }) ); - const response = await got.post(url, { - json: params, - responseType: 'json', - enableUnixSockets: true, - headers: { - Authorization: `Basic ${Buffer.from(`${this.rcloneUsername}:${this.rclonePassword}`).toString('base64')}`, - }, + const jobs: RCloneJob[] = []; + + // For each backup group, create a job entry with proper stats + backupGroups.forEach((group) => { + const groupStats = groupStatsMap.get(group); + if (!groupStats) return; + + this.logger.debug(`Processing group ${group}: stats=${JSON.stringify(groupStats)}`); + + const extractedConfigId = getConfigIdFromGroupId(group); + + const enhancedStats = this.statusService.enhanceStatsWithFormattedFields({ + ...groupStats, + group, + }); + + const job = this.statusService.transformStatsToJob(group, enhancedStats); + job.configId = extractedConfigId; + + // Only include jobs that are truly active (not completed) + const isActivelyTransferring = groupStats.transferring?.length > 0; + const isActivelyChecking = groupStats.checking?.length > 0; + const hasActiveSpeed = groupStats.speed > 0; + const isNotFinished = !groupStats.finished && groupStats.fatalError !== true; + + if ((isActivelyTransferring || isActivelyChecking || hasActiveSpeed) && isNotFinished) { + jobs.push(job); + } }); + this.logger.debug( + `Found ${jobs.length} active backup jobs from ${backupGroups.length} groups` + ); + return jobs; + } catch (error) { + this.logger.error('Failed to get jobs with stats:', error); + return []; + } + } + + async stopAllJobs(): Promise { + const runningJobs = await this.listRunningJobs(); + + if (!runningJobs.jobids?.length) { + this.logger.log('No running jobs to stop'); + return { stopped: [], errors: [] }; + } + + this.logger.log(`Stopping ${runningJobs.jobids.length} running jobs`); + return this.executeJobOperation(runningJobs.jobids, 'stop'); + } + + async stopJob(jobId: string): Promise { + this.logger.log(`Stopping job: ${jobId}`); + + if (isBackupJobGroup(jobId)) { + // This is a group, use the stopgroup endpoint + return this.executeGroupOperation([jobId], 'stopgroup'); + } else { + // This is an individual job ID, use the regular stop endpoint + return this.executeJobOperation([jobId], 'stop'); + } + } + + private async executeGroupOperation( + groupNames: string[], + operation: 'stopgroup' + ): Promise { + const stopped: string[] = []; + const errors: string[] = []; + + const promises = groupNames.map(async (groupName) => { + try { + await this.callRcloneApi(`job/${operation}`, { group: groupName }); + stopped.push(groupName); + this.logger.log(`${operation}ped group: ${groupName}`); + } catch (error) { + const errorMsg = `Failed to ${operation} group ${groupName}: ${error}`; + errors.push(errorMsg); + this.logger.error(errorMsg); + } + }); + + await Promise.allSettled(promises); + return { stopped, errors }; + } + + private async executeJobOperation( + jobIds: (string | number)[], + operation: 'stop' + ): Promise { + const stopped: string[] = []; + const errors: string[] = []; + + const promises = jobIds.map(async (jobId) => { + try { + await this.callRcloneApi(`job/${operation}`, { jobid: jobId }); + stopped.push(String(jobId)); + this.logger.log(`${operation}ped job: ${jobId}`); + } catch (error) { + const errorMsg = `Failed to ${operation} job ${jobId}: ${error}`; + errors.push(errorMsg); + this.logger.error(errorMsg); + } + }); + + await Promise.allSettled(promises); + return { stopped, errors }; + } + + async getBackupStatus(): Promise { + const runningJobs = await this.listRunningJobs(); + + if (!runningJobs.jobids?.length) { + return this.statusService.parseBackupStatus(runningJobs, []); + } + + const jobStatuses = await Promise.allSettled( + runningJobs.jobids.map((jobId) => this.getIndividualJobStatus(String(jobId))) + ); + + return this.statusService.parseBackupStatus(runningJobs, jobStatuses); + } + + private async callRcloneApi(endpoint: string, params: Record = {}): Promise { + const url = `${this.rcloneBaseUrl}/${endpoint}`; + + // Unix sockets don't require HTTP authentication - the socket itself provides security + const isUnixSocket = this.rcloneSocketPath && this.rcloneSocketPath.startsWith('/'); + + const requestOptions: any = { + json: params, + responseType: 'json', + enableUnixSockets: true, + }; + + // Only add authentication headers for non-Unix socket connections + if (!isUnixSocket && this.rcloneUsername && this.rclonePassword) { + const authString = `${this.rcloneUsername}:${this.rclonePassword}`; + const authHeader = `Basic ${Buffer.from(authString).toString('base64')}`; + requestOptions.headers = { + Authorization: authHeader, + }; + this.logger.debug( + `Calling RClone API: ${endpoint} with auth header: ${authHeader.substring(0, 20)}...` + ); + } else { + this.logger.debug(`Calling RClone API: ${endpoint} via Unix socket (no auth required)`); + } + + try { + const response = await got.post(url, requestOptions); return response.body; } catch (error: unknown) { this.handleApiError(error, endpoint, params); @@ -378,54 +702,108 @@ export class RCloneApiService implements OnModuleInit, OnModuleDestroy { } private handleApiError(error: unknown, endpoint: string, params: Record): never { + const sanitizedParams = sanitizeParams(params); + if (error instanceof HTTPError) { const statusCode = error.response.statusCode; const rcloneError = this.extractRcloneError(error.response.body, params); - const detailedErrorMessage = `Rclone API Error (${endpoint}, HTTP ${statusCode}): ${rcloneError}`; - - const sanitizedParams = sanitizeParams(params); - this.logger.error( - `Original ${detailedErrorMessage} | Params: ${JSON.stringify(sanitizedParams)}`, - error.stack - ); + const message = `Rclone API Error (${endpoint}, HTTP ${statusCode}): ${rcloneError}`; - throw new Error(detailedErrorMessage); - } else if (error instanceof Error) { - const detailedErrorMessage = `Error calling RClone API (${endpoint}) with params ${JSON.stringify(sanitizeParams(params))}: ${error.message}`; - this.logger.error(detailedErrorMessage, error.stack); - throw error; - } else { - const detailedErrorMessage = `Unknown error calling RClone API (${endpoint}) with params ${JSON.stringify(sanitizeParams(params))}: ${String(error)}`; - this.logger.error(detailedErrorMessage); - throw new Error(detailedErrorMessage); + this.logger.error(`${message} | Params: ${JSON.stringify(sanitizedParams)}`, error.stack); + throw new Error(message); } + + const message = + error instanceof Error + ? `Error calling RClone API (${endpoint}): ${error.message}` + : `Unknown error calling RClone API (${endpoint}): ${String(error)}`; + + this.logger.error( + `${message} | Params: ${JSON.stringify(sanitizedParams)}`, + error instanceof Error ? error.stack : undefined + ); + throw error instanceof Error ? error : new Error(message); } private extractRcloneError(responseBody: unknown, fallbackParams: Record): string { try { - let errorBody: unknown; - if (typeof responseBody === 'string') { - errorBody = JSON.parse(responseBody); - } else if (typeof responseBody === 'object' && responseBody !== null) { - errorBody = responseBody; - } + const errorBody = typeof responseBody === 'string' ? JSON.parse(responseBody) : responseBody; if (errorBody && typeof errorBody === 'object' && 'error' in errorBody) { - const typedErrorBody = errorBody as { error: unknown; input?: unknown }; - let rcloneError = `Rclone Error: ${String(typedErrorBody.error)}`; - if (typedErrorBody.input) { - rcloneError += ` | Input: ${JSON.stringify(typedErrorBody.input)}`; - } else if (fallbackParams) { - rcloneError += ` | Original Params: ${JSON.stringify(fallbackParams)}`; + const typedError = errorBody as { error: unknown; input?: unknown }; + let message = `Rclone Error: ${String(typedError.error)}`; + + if (typedError.input) { + message += ` | Input: ${JSON.stringify(typedError.input)}`; + } else { + message += ` | Params: ${JSON.stringify(fallbackParams)}`; } - return rcloneError; - } else if (responseBody) { - return `Non-standard error response body: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`; - } else { - return 'Empty error response body received.'; + + return message; + } + + return responseBody + ? `Non-standard error response: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}` + : 'Empty error response received'; + } catch { + return `Failed to process error response: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`; + } + } + + private async killExistingRcloneProcesses(): Promise { + try { + this.logger.log('Checking for existing rclone processes...'); + const { stdout } = await execa('pgrep', ['-f', 'rclone.*rcd'], { reject: false }); + + if (!stdout.trim()) { + this.logger.log('No existing rclone processes found'); + return; + } + + const pids = stdout + .trim() + .split('\n') + .filter((pid) => pid.trim()); + this.logger.log(`Found ${pids.length} existing rclone process(es): ${pids.join(', ')}`); + + await this.terminateProcesses(pids); + await this.cleanupStaleSocket(); + } catch (error) { + this.logger.warn(`Error during rclone process cleanup: ${error}`); + } + } + + private async terminateProcesses(pids: string[]): Promise { + for (const pid of pids) { + try { + this.logger.log(`Terminating rclone process PID: ${pid}`); + + await execa('kill', ['-TERM', pid], { reject: false }); + await new Promise((resolve) => + setTimeout(resolve, CONSTANTS.TIMEOUTS.GRACEFUL_SHUTDOWN) + ); + + const { exitCode } = await execa('kill', ['-0', pid], { reject: false }); + + if (exitCode === 0) { + this.logger.warn(`Process ${pid} still running, using SIGKILL`); + await execa('kill', ['-KILL', pid], { reject: false }); + await new Promise((resolve) => + setTimeout(resolve, CONSTANTS.TIMEOUTS.PROCESS_CLEANUP) + ); + } + + this.logger.log(`Successfully terminated process ${pid}`); + } catch (error) { + this.logger.warn(`Failed to kill process ${pid}: ${error}`); } - } catch (parseOrAccessError) { - return `Failed to process error response body. Raw body: ${typeof responseBody === 'string' ? responseBody : JSON.stringify(responseBody)}`; + } + } + + private async cleanupStaleSocket(): Promise { + if (this.rcloneSocketPath && existsSync(this.rcloneSocketPath)) { + await rm(this.rcloneSocketPath, { force: true }); + this.logger.log('Removed stale socket file'); } } } diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.test.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.test.ts new file mode 100644 index 0000000000..f280f13412 --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.test.ts @@ -0,0 +1,505 @@ +import { Test, TestingModule } from '@nestjs/testing'; + +import { beforeEach, describe, expect, it, vi } from 'vitest'; + +import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js'; +import { RCloneJobStats } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +// Mock NestJS Logger to suppress logs during tests +vi.mock('@nestjs/common', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + Logger: vi.fn(() => ({ + log: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + })), + }; +}); + +describe('RCloneStatusService', () => { + let service: RCloneStatusService; + let mockFormatService: FormatService; + + beforeEach(() => { + vi.clearAllMocks(); + + mockFormatService = { + formatBytes: vi.fn().mockImplementation((bytes: number) => `${bytes} B`), + formatSpeed: vi.fn().mockImplementation((bytesPerSecond: number) => `${bytesPerSecond} B/s`), + formatDuration: vi.fn().mockImplementation((seconds: number) => `${seconds}s`), + } as any; + + service = new RCloneStatusService(mockFormatService); + }); + + describe('enhanceStatsWithFormattedFields', () => { + it('should add formatted fields for all numeric stats', () => { + const stats: RCloneJobStats = { + bytes: 1024, + speed: 512, + elapsedTime: 60, + eta: 120, + }; + + const result = service.enhanceStatsWithFormattedFields(stats); + + expect(result).toEqual({ + bytes: 1024, + speed: 512, + elapsedTime: 60, + eta: 120, + formattedBytes: '1024 B', + formattedSpeed: '512 B/s', + formattedElapsedTime: '60s', + formattedEta: '120s', + calculatedPercentage: 0, + isActivelyRunning: true, + isCompleted: false, + }); + expect(mockFormatService.formatBytes).toHaveBeenCalledWith(1024); + expect(mockFormatService.formatSpeed).toHaveBeenCalledWith(512); + expect(mockFormatService.formatDuration).toHaveBeenCalledWith(60); + expect(mockFormatService.formatDuration).toHaveBeenCalledWith(120); + }); + + it('should not add formatted fields for undefined values', () => { + const stats: RCloneJobStats = { + bytes: undefined, + speed: undefined, + elapsedTime: undefined, + eta: undefined, + }; + + const result = service.enhanceStatsWithFormattedFields(stats); + + expect(result).toEqual({ + bytes: undefined, + speed: undefined, + elapsedTime: undefined, + eta: undefined, + calculatedPercentage: 0, + formattedElapsedTime: '0s', + formattedEta: 'Unknown', + formattedSpeed: '0 B/s', + isActivelyRunning: false, + isCompleted: false, + }); + expect(mockFormatService.formatBytes).not.toHaveBeenCalled(); + expect(mockFormatService.formatDuration).not.toHaveBeenCalled(); + }); + + it('should not add formatted fields for null values', () => { + const stats: RCloneJobStats = { + bytes: null as any, + speed: null as any, + elapsedTime: null as any, + eta: null as any, + }; + + const result = service.enhanceStatsWithFormattedFields(stats); + + expect(result).toEqual({ + bytes: null, + speed: null, + elapsedTime: null, + eta: null, + calculatedPercentage: 0, + formattedElapsedTime: '0s', + formattedEta: 'Unknown', + formattedSpeed: '0 B/s', + isActivelyRunning: false, + isCompleted: false, + }); + expect(mockFormatService.formatBytes).not.toHaveBeenCalled(); + expect(mockFormatService.formatDuration).not.toHaveBeenCalled(); + }); + + it('should not add formatted speed for zero speed', () => { + const stats: RCloneJobStats = { + speed: 0, + }; + + const result = service.enhanceStatsWithFormattedFields(stats); + + expect(result).toEqual({ + speed: 0, + calculatedPercentage: 0, + formattedElapsedTime: '0s', + formattedEta: 'Unknown', + formattedSpeed: '0 B/s', + isActivelyRunning: false, + isCompleted: false, + }); + expect(mockFormatService.formatSpeed).not.toHaveBeenCalled(); + }); + + it('should not add formatted eta for zero eta', () => { + const stats: RCloneJobStats = { + eta: 0, + }; + + const result = service.enhanceStatsWithFormattedFields(stats); + + expect(result).toEqual({ + eta: 0, + calculatedPercentage: 0, + formattedElapsedTime: '0s', + formattedEta: 'Unknown', + formattedSpeed: '0 B/s', + isActivelyRunning: false, + isCompleted: false, + }); + expect(mockFormatService.formatDuration).not.toHaveBeenCalled(); + }); + }); + + describe('transformStatsToJob', () => { + it('should create RCloneJob with completed status when transfers match total', () => { + const stats: RCloneJobStats = { + group: 'unraid-backup', + fatalError: false, + transfers: 5, + totalTransfers: 5, + errors: 0, + percentage: 100, + }; + + const result = service.transformStatsToJob('123', stats); + + expect(result).toEqual({ + id: '123', + group: 'unraid-backup', + stats, + finished: true, + success: true, + error: undefined, + progressPercentage: 100, + status: BackupJobStatus.COMPLETED, + hasRecentJob: true, + }); + }); + + it('should create RCloneJob with running status when transfers incomplete', () => { + const stats: RCloneJobStats = { + group: 'unraid-backup', + fatalError: false, + transfers: 3, + totalTransfers: 5, + errors: 0, + percentage: 60, + }; + + const result = service.transformStatsToJob('123', stats); + + expect(result).toEqual({ + id: '123', + group: 'unraid-backup', + stats, + finished: false, + success: true, + error: undefined, + progressPercentage: 60, + status: BackupJobStatus.RUNNING, + hasRecentJob: true, + }); + }); + + it('should create RCloneJob with error status when lastError exists', () => { + const stats: RCloneJobStats = { + group: 'unraid-backup', + fatalError: false, + transfers: 0, + totalTransfers: 5, + errors: 1, + percentage: 0, + lastError: 'Connection timeout', + }; + + const result = service.transformStatsToJob('123', stats); + + expect(result).toEqual({ + id: '123', + group: 'unraid-backup', + stats, + finished: false, + success: false, + error: 'Connection timeout', + progressPercentage: 0, + status: BackupJobStatus.FAILED, + hasRecentJob: true, + }); + }); + + it('should create RCloneJob with cancelled status when lastError is context canceled', () => { + const stats: RCloneJobStats = { + group: 'unraid-backup', + fatalError: false, + transfers: 0, + totalTransfers: 5, + errors: 1, + percentage: 0, + lastError: 'context canceled', + }; + + const result = service.transformStatsToJob('123', stats); + + expect(result).toEqual({ + id: '123', + group: 'unraid-backup', + stats, + finished: false, + success: false, + error: 'context canceled', + progressPercentage: 0, + status: BackupJobStatus.CANCELLED, + hasRecentJob: true, + }); + }); + + it('should handle numeric job ID', () => { + const stats: RCloneJobStats = { + fatalError: false, + transfers: 0, + totalTransfers: 0, + }; + + const result = service.transformStatsToJob(456, stats); + + expect(result.id).toBe('456'); + }); + + it('should handle missing group', () => { + const stats: RCloneJobStats = { + fatalError: false, + transfers: 0, + totalTransfers: 0, + }; + + const result = service.transformStatsToJob('123', stats); + + expect(result.group).toBeUndefined(); + }); + }); + + describe('calculateCombinedStats', () => { + it('should combine stats from multiple jobs', () => { + const mockActiveJobs = [ + { + stats: { + bytes: 1024, + checks: 2, + transfers: 3, + totalBytes: 2048, + totalChecks: 4, + totalTransfers: 6, + speed: 100, + eta: 120, + }, + }, + { + stats: { + bytes: 512, + checks: 1, + transfers: 2, + totalBytes: 1024, + totalChecks: 2, + totalTransfers: 4, + speed: 200, + eta: 60, + }, + }, + ]; + + const result = service.calculateCombinedStats(mockActiveJobs); + + expect(result).toEqual({ + bytes: 1536, + checks: 3, + transfers: 5, + totalBytes: 3072, + totalChecks: 6, + totalTransfers: 10, + speed: 200, // Max speed + eta: 120, // Max eta + }); + }); + + it('should return null for empty jobs array', () => { + const result = service.calculateCombinedStats([]); + expect(result).toBeNull(); + }); + + it('should return null when no valid stats', () => { + const mockActiveJobs = [{ stats: null as any }, { stats: undefined as any }]; + const result = service.calculateCombinedStats(mockActiveJobs); + expect(result).toBeNull(); + }); + }); + + describe('parseActiveJobs', () => { + it('should return active jobs that are not finished', () => { + const mockJobStatuses = [ + { status: 'fulfilled', value: { id: '1', finished: false } }, + { status: 'fulfilled', value: { id: '2', finished: true } }, + { status: 'rejected', reason: 'Error' }, + ] as PromiseSettledResult[]; + + const result = service.parseActiveJobs(mockJobStatuses); + + expect(result).toEqual([{ id: '1', finished: false }]); + }); + + it('should return empty array when all jobs are finished', () => { + const mockJobStatuses = [ + { status: 'fulfilled', value: { id: '1', finished: true } }, + ] as PromiseSettledResult[]; + + const result = service.parseActiveJobs(mockJobStatuses); + + expect(result).toEqual([]); + }); + }); + + describe('parseBackupStatus', () => { + it('should return running status when active jobs exist', () => { + const mockRunningJobs = { jobids: ['123', '456'] }; + const mockJobStatuses = [ + { status: 'fulfilled', value: { id: '123', finished: false, stats: { bytes: 1024 } } }, + { status: 'fulfilled', value: { id: '456', finished: false, stats: { bytes: 512 } } }, + ] as PromiseSettledResult[]; + + const result = service.parseBackupStatus(mockRunningJobs, mockJobStatuses); + + expect(result).toEqual({ + isRunning: true, + stats: expect.objectContaining({ bytes: 1536 }), + jobCount: 2, + activeJobs: expect.arrayContaining([ + expect.objectContaining({ id: '123', finished: false }), + expect.objectContaining({ id: '456', finished: false }), + ]), + }); + }); + + it('should return not running when no job IDs', () => { + const mockRunningJobs = { jobids: [] }; + const mockJobStatuses = [] as PromiseSettledResult[]; + + const result = service.parseBackupStatus(mockRunningJobs, mockJobStatuses); + + expect(result).toEqual({ + isRunning: false, + stats: null, + jobCount: 0, + activeJobs: [], + }); + }); + }); + + describe('parseJobWithStats', () => { + it('should parse job with enhanced stats', () => { + const mockJobStatus = { + stats: { bytes: 1024, speed: 512 }, + }; + + const result = service.parseJobWithStats('123', mockJobStatus); + + expect(result).toEqual( + expect.objectContaining({ + id: '123', + stats: expect.objectContaining({ + bytes: 1024, + speed: 512, + formattedBytes: '1024 B', + formattedSpeed: '512 B/s', + }), + }) + ); + }); + + it('should handle missing stats', () => { + const mockJobStatus = {}; + + const result = service.parseJobWithStats('123', mockJobStatus); + + expect(result.id).toBe('123'); + expect(result.stats).toEqual({}); + }); + }); + + describe('parseAllJobsWithStats', () => { + it('should return jobs when job IDs exist', () => { + const mockRunningJobs = { jobids: ['123', '456'] }; + const mockJobs = [ + { id: '123', group: 'unraid-backup' }, + { id: '456', group: 'unraid-backup' }, + ] as any[]; + + const result = service.parseAllJobsWithStats(mockRunningJobs, mockJobs); + + expect(result).toEqual(mockJobs); + }); + + it('should return empty array when no job IDs', () => { + const mockRunningJobs = { jobids: [] }; + const mockJobs = [] as any[]; + + const result = service.parseAllJobsWithStats(mockRunningJobs, mockJobs); + + expect(result).toEqual([]); + }); + }); + + describe('parseJobsWithStats', () => { + it('should parse fulfilled job statuses', () => { + const mockJobStatuses = [ + { status: 'fulfilled', value: { id: '123', stats: { bytes: 1024 } } }, + { status: 'fulfilled', value: { id: '456', stats: { bytes: 512 } } }, + { status: 'rejected', reason: 'Error' }, + ] as PromiseSettledResult[]; + + const result = service.parseJobsWithStats(mockJobStatuses); + + expect(result).toHaveLength(2); + expect(result[0]).toEqual( + expect.objectContaining({ + id: '123', + stats: expect.objectContaining({ bytes: 1024, formattedBytes: '1024 B' }), + }) + ); + expect(result[1]).toEqual( + expect.objectContaining({ + id: '456', + stats: expect.objectContaining({ bytes: 512, formattedBytes: '512 B' }), + }) + ); + }); + + it('should handle rejected statuses gracefully', () => { + const mockJobStatuses = [ + { status: 'rejected', reason: 'Error' }, + ] as PromiseSettledResult[]; + + const result = service.parseJobsWithStats(mockJobStatuses); + + expect(result).toEqual([]); + }); + }); + + describe('getBackupStatus', () => { + it('should return default backup status', () => { + const result = service.getBackupStatus(); + + expect(result).toEqual({ + isRunning: false, + stats: null, + jobCount: 0, + }); + }); + }); +}); diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.ts new file mode 100644 index 0000000000..836322576f --- /dev/null +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone-status.service.ts @@ -0,0 +1,268 @@ +import { Injectable, Logger } from '@nestjs/common'; + +import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; +import { + RCloneJob, + RCloneJobListResponse, + RCloneJobStats, + RCloneJobWithStats, +} from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +// Internal interface for job status response from RClone API +interface RCloneJobStatusResponse { + id?: string | number; + group?: string; + stats?: RCloneJobStats; + finished?: boolean; + error?: string; + [key: string]: any; +} + +interface BackupStatusResult { + isRunning: boolean; + stats: RCloneJobStats | null; + jobCount: number; + activeJobs: RCloneJobStatusResponse[]; +} + +@Injectable() +export class RCloneStatusService { + private readonly logger = new Logger(RCloneStatusService.name); + + constructor(private readonly formatService: FormatService) {} + + enhanceStatsWithFormattedFields(stats: RCloneJobStats): RCloneJobStats { + const enhancedStats = { ...stats }; + + const isFinished = + stats.fatalError === false && + stats.transfers === (stats.totalTransfers || 0) && + (stats.totalTransfers || 0) > 0; + + // Format bytes + if (stats.bytes !== undefined && stats.bytes !== null) { + enhancedStats.formattedBytes = this.formatService.formatBytes(stats.bytes); + } + + // Handle speed formatting and reset for finished jobs + if (isFinished && stats.speed !== undefined && stats.speed !== null) { + enhancedStats.speed = 0; + } + + if (stats.speed !== undefined && stats.speed !== null && stats.speed > 0) { + enhancedStats.formattedSpeed = this.formatService.formatSpeed(stats.speed); + } else { + enhancedStats.formattedSpeed = '0 B/s'; + } + + // Format elapsed time + if (stats.elapsedTime !== undefined && stats.elapsedTime !== null) { + enhancedStats.formattedElapsedTime = this.formatService.formatDuration(stats.elapsedTime); + } else { + enhancedStats.formattedElapsedTime = '0s'; + } + + // Format ETA + if (stats.eta !== undefined && stats.eta !== null && stats.eta > 0) { + enhancedStats.formattedEta = this.formatService.formatDuration(stats.eta); + } else { + enhancedStats.formattedEta = 'Unknown'; + } + + // Calculate percentage fallback (what frontend currently does) + let calculatedPercentage = stats.percentage; + if (calculatedPercentage === null || calculatedPercentage === undefined) { + if (stats.bytes && stats.totalBytes && stats.totalBytes > 0) { + calculatedPercentage = Math.round((stats.bytes / stats.totalBytes) * 100); + } + } + + // For completed jobs, ensure percentage is 100 + if (isFinished && calculatedPercentage !== null && calculatedPercentage !== undefined) { + calculatedPercentage = 100; + } + + enhancedStats.calculatedPercentage = Math.round(calculatedPercentage || 0); + + // Determine if actively running (what frontend currently calculates) + const isActivelyTransferring = + stats.transferring && Array.isArray(stats.transferring) && stats.transferring.length > 0; + const isActivelyChecking = + stats.checking && Array.isArray(stats.checking) && stats.checking.length > 0; + const hasActiveSpeed = (stats.speed || 0) > 0; + const isNotFinished = !isFinished && stats.fatalError !== true; + + enhancedStats.isActivelyRunning = + (isActivelyTransferring || isActivelyChecking || hasActiveSpeed) && isNotFinished; + enhancedStats.isCompleted = isFinished; + + return enhancedStats; + } + + transformStatsToJob(jobId: string | number, stats: RCloneJobStats): RCloneJob { + this.logger.debug(`Stats for job ${jobId}: %o`, stats); + const group = stats.group || undefined; + + this.logger.debug(`Processing job ${jobId}: group="${group}"`); + + const isFinished = + stats.fatalError === false && + stats.transfers === (stats.totalTransfers || 0) && + (stats.totalTransfers || 0) > 0; + + const hasError = Boolean(stats.lastError); + const isCancelled = stats.lastError === 'context canceled'; + + // Determine status + let status: BackupJobStatus; + + if (hasError) { + if (isCancelled) { + status = BackupJobStatus.CANCELLED; + } else { + status = BackupJobStatus.FAILED; + } + } else if (isFinished || stats.calculatedPercentage === 100) { + status = BackupJobStatus.COMPLETED; + } else { + status = BackupJobStatus.RUNNING; + } + + return { + id: String(jobId), + group: group, + stats, + finished: isFinished, + success: stats.fatalError === false && (stats.errors || 0) === 0, + error: stats.lastError || undefined, + progressPercentage: stats.calculatedPercentage || stats.percentage, + status, + hasRecentJob: true, // If we have a job object, there's a recent job + }; + } + + calculateCombinedStats(activeJobs: RCloneJobStatusResponse[]): RCloneJobStats | null { + if (activeJobs.length === 0) return null; + + const validStats = activeJobs + .map((job) => job.stats) + .filter((stats): stats is RCloneJobStats => Boolean(stats)); + + if (validStats.length === 0) return null; + + return validStats.reduce( + (combined, stats) => ({ + bytes: (combined.bytes || 0) + (stats.bytes || 0), + checks: (combined.checks || 0) + (stats.checks || 0), + transfers: (combined.transfers || 0) + (stats.transfers || 0), + totalBytes: (combined.totalBytes || 0) + (stats.totalBytes || 0), + totalChecks: (combined.totalChecks || 0) + (stats.totalChecks || 0), + totalTransfers: (combined.totalTransfers || 0) + (stats.totalTransfers || 0), + speed: Math.max(combined.speed || 0, stats.speed || 0), + eta: Math.max(combined.eta || 0, stats.eta || 0), + }), + {} as RCloneJobStats + ); + } + + parseActiveJobs( + jobStatuses: PromiseSettledResult[] + ): RCloneJobStatusResponse[] { + const activeJobs: RCloneJobStatusResponse[] = []; + + this.logger.debug(`Job statuses: ${JSON.stringify(jobStatuses)}`); + + jobStatuses.forEach((result, index) => { + if (result.status === 'fulfilled' && !result.value.finished) { + activeJobs.push(result.value); + } else if (result.status === 'rejected') { + this.logger.warn(`Failed to get status for job ${index}: ${result.reason}`); + } + }); + + return activeJobs; + } + + parseBackupStatus( + runningJobs: RCloneJobListResponse, + jobStatuses: PromiseSettledResult[] + ): BackupStatusResult { + if (!runningJobs.jobids?.length) { + return { + isRunning: false, + stats: null, + jobCount: 0, + activeJobs: [], + }; + } + + const activeJobs = this.parseActiveJobs(jobStatuses); + const combinedStats = this.calculateCombinedStats(activeJobs); + + return { + isRunning: activeJobs.length > 0, + stats: combinedStats, + jobCount: activeJobs.length, + activeJobs, + }; + } + + parseJobWithStats(jobId: string, jobStatus: RCloneJobStatusResponse): RCloneJob { + const stats = jobStatus.stats ? this.enhanceStatsWithFormattedFields(jobStatus.stats) : {}; + return this.transformStatsToJob(jobId, stats); + } + + parseAllJobsWithStats(runningJobs: RCloneJobListResponse, jobs: RCloneJob[]): RCloneJob[] { + if (!runningJobs.jobids?.length) { + this.logger.log('No active jobs found in RClone'); + return []; + } + + this.logger.log( + `Found ${runningJobs.jobids.length} active jobs in RClone: [${runningJobs.jobids.join(', ')}]` + ); + + return jobs; + } + + parseJobsWithStats(jobStatuses: PromiseSettledResult[]): RCloneJob[] { + const allJobs: RCloneJob[] = []; + + jobStatuses.forEach((result, index) => { + if (result.status === 'fulfilled') { + const jobStatus = result.value; + const stats = jobStatus.stats + ? this.enhanceStatsWithFormattedFields(jobStatus.stats) + : {}; + const job = this.transformStatsToJob(jobStatus.id || index, stats); + allJobs.push(job); + } else { + this.logger.error(`Failed to get status for job ${index}: ${result.reason}`); + } + }); + + return allJobs; + } + + getBackupStatus(): { + isRunning: boolean; + stats: RCloneJobStats | null; + jobCount: number; + } { + try { + return { + isRunning: false, + stats: null, + jobCount: 0, + }; + } catch (error) { + this.logger.debug(`Error getting backup status: ${error}`); + return { + isRunning: false, + stats: null, + jobCount: 0, + }; + } + } +} diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts index 97cc7d04f2..f4497a03cf 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone.model.ts @@ -1,9 +1,11 @@ import { Field, ID, InputType, ObjectType } from '@nestjs/graphql'; import { type Layout } from '@jsonforms/core'; +import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js'; import { IsBoolean, IsObject, IsOptional, IsString } from 'class-validator'; import { GraphQLJSON } from 'graphql-scalars'; +import { BackupJobStatus } from '@app/unraid-api/graph/resolvers/backup/orchestration/backup-job-status.model.js'; import { DataSlice } from '@app/unraid-api/types/json-forms.js'; @ObjectType() @@ -147,6 +149,19 @@ export class RCloneStartBackupInput { @IsString() dstPath!: string; + @Field(() => Boolean, { nullable: true, defaultValue: false }) + @IsOptional() + @IsBoolean() + async?: boolean; + + @Field(() => String, { + nullable: true, + description: 'Configuration ID for job grouping and identification', + }) + @IsOptional() + @IsString() + configId?: string; + @Field(() => GraphQLJSON, { nullable: true }) @IsOptional() @IsObject() @@ -206,3 +221,189 @@ export class GetRCloneJobStatusDto { @IsString() jobId!: string; } + +@ObjectType() +export class RCloneJobStats { + @Field(() => Number, { description: 'Bytes transferred', nullable: true }) + bytes?: number; + + @Field(() => Number, { description: 'Transfer speed in bytes/sec', nullable: true }) + speed?: number; + + @Field(() => Number, { description: 'Estimated time to completion in seconds', nullable: true }) + eta?: number; + + @Field(() => Number, { description: 'Elapsed time in seconds', nullable: true }) + elapsedTime?: number; + + @Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true }) + percentage?: number; + + @Field(() => Number, { description: 'Number of checks completed', nullable: true }) + checks?: number; + + @Field(() => Number, { description: 'Number of deletes completed', nullable: true }) + deletes?: number; + + @Field(() => Number, { description: 'Number of errors encountered', nullable: true }) + errors?: number; + + @Field(() => Boolean, { description: 'Whether a fatal error occurred', nullable: true }) + fatalError?: boolean; + + @Field(() => String, { description: 'Last error message', nullable: true }) + lastError?: string; + + @Field(() => Number, { description: 'Number of renames completed', nullable: true }) + renames?: number; + + @Field(() => Boolean, { description: 'Whether there is a retry error', nullable: true }) + retryError?: boolean; + + @Field(() => Number, { description: 'Number of server-side copies', nullable: true }) + serverSideCopies?: number; + + @Field(() => Number, { description: 'Bytes in server-side copies', nullable: true }) + serverSideCopyBytes?: number; + + @Field(() => Number, { description: 'Number of server-side moves', nullable: true }) + serverSideMoves?: number; + + @Field(() => Number, { description: 'Bytes in server-side moves', nullable: true }) + serverSideMoveBytes?: number; + + @Field(() => Number, { description: 'Total bytes to transfer', nullable: true }) + totalBytes?: number; + + @Field(() => Number, { description: 'Total checks to perform', nullable: true }) + totalChecks?: number; + + @Field(() => Number, { description: 'Total transfers to perform', nullable: true }) + totalTransfers?: number; + + @Field(() => Number, { description: 'Time spent transferring in seconds', nullable: true }) + transferTime?: number; + + @Field(() => Number, { description: 'Number of transfers completed', nullable: true }) + transfers?: number; + + @Field(() => GraphQLJSON, { description: 'Currently transferring files', nullable: true }) + transferring?: any[]; + + @Field(() => GraphQLJSON, { description: 'Currently checking files', nullable: true }) + checking?: any[]; + + // Formatted fields + @Field(() => String, { description: 'Human-readable bytes transferred', nullable: true }) + formattedBytes?: string; + + @Field(() => String, { description: 'Human-readable transfer speed', nullable: true }) + formattedSpeed?: string; + + @Field(() => String, { description: 'Human-readable elapsed time', nullable: true }) + formattedElapsedTime?: string; + + @Field(() => String, { description: 'Human-readable ETA', nullable: true }) + formattedEta?: string; + + // Computed fields that frontend currently calculates + @Field(() => Number, { + description: 'Calculated percentage (fallback when percentage is null)', + nullable: true, + }) + calculatedPercentage?: number; + + @Field(() => Boolean, { description: 'Whether the job is actively running', nullable: true }) + isActivelyRunning?: boolean; + + @Field(() => Boolean, { description: 'Whether the job is completed', nullable: true }) + isCompleted?: boolean; + + // Allow additional fields + [key: string]: any; +} + +@ObjectType() +export class RCloneJob { + @Field(() => PrefixedID, { description: 'Job ID' }) + id!: string; + + @Field(() => String, { description: 'RClone group for the job', nullable: true }) + group?: string; + + @Field(() => RCloneJobStats, { description: 'Job status and statistics', nullable: true }) + stats?: RCloneJobStats; + + @Field(() => Number, { description: 'Progress percentage (0-100)', nullable: true }) + progressPercentage?: number; + + @Field(() => PrefixedID, { description: 'Configuration ID that triggered this job', nullable: true }) + configId?: string; + + @Field(() => BackupJobStatus, { description: 'Current status of the job', nullable: true }) + status?: BackupJobStatus; + + @Field(() => Boolean, { description: 'Whether the job is finished', nullable: true }) + finished?: boolean; + + @Field(() => Boolean, { description: 'Whether the job was successful', nullable: true }) + success?: boolean; + + @Field(() => String, { description: 'Error message if job failed', nullable: true }) + error?: string; + + // Computed fields that frontend currently calculates + @Field(() => Boolean, { description: 'Whether the job is actively running', nullable: true }) + isRunning?: boolean; + + @Field(() => String, { description: 'Error message for display', nullable: true }) + errorMessage?: string; + + @Field(() => Boolean, { description: 'Whether there is a recent job', nullable: true }) + hasRecentJob?: boolean; +} + +@ObjectType() +export class RCloneJobStatusDto { + @Field(() => Number, { description: 'Job ID' }) + id!: number; + + @Field(() => String, { description: 'RClone group for the job' }) + group!: string; + + @Field(() => Boolean, { description: 'Whether the job is finished' }) + finished!: boolean; + + @Field(() => Boolean, { description: 'Whether the job was successful' }) + success!: boolean; + + @Field(() => String, { description: 'Error message if any' }) + error!: string; + + @Field(() => Number, { description: 'Job duration in seconds' }) + duration!: number; + + @Field(() => String, { description: 'Job start time in ISO format' }) + startTime!: string; + + @Field(() => String, { description: 'Job end time in ISO format' }) + endTime!: string; + + @Field(() => GraphQLJSON, { description: 'Job output data', nullable: true }) + output?: Record; +} + +// API Response Types (for internal use) +export interface RCloneJobListResponse { + jobids: (string | number)[]; +} + +export interface RCloneJobWithStats { + jobId: string | number; + stats: RCloneJobStats; +} + +export interface RCloneJobsWithStatsResponse { + jobids: (string | number)[]; + stats: RCloneJobStats[]; +} diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone.module.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone.module.ts index ff4619f113..df3c1820f4 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone.module.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone.module.ts @@ -1,20 +1,24 @@ -import { Module } from '@nestjs/common'; +import { forwardRef, Module } from '@nestjs/common'; +import { BackupSourceModule } from '@app/unraid-api/graph/resolvers/backup/source/backup-source.module.js'; import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js'; import { RCloneFormService } from '@app/unraid-api/graph/resolvers/rclone/rclone-form.service.js'; +import { RCloneStatusService } from '@app/unraid-api/graph/resolvers/rclone/rclone-status.service.js'; import { RCloneMutationsResolver } from '@app/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.js'; import { RCloneBackupSettingsResolver } from '@app/unraid-api/graph/resolvers/rclone/rclone.resolver.js'; import { RCloneService } from '@app/unraid-api/graph/resolvers/rclone/rclone.service.js'; +import { UtilsModule } from '@app/unraid-api/utils/utils.module.js'; @Module({ - imports: [], + imports: [UtilsModule, forwardRef(() => BackupSourceModule)], providers: [ RCloneService, RCloneApiService, + RCloneStatusService, RCloneFormService, RCloneBackupSettingsResolver, RCloneMutationsResolver, ], - exports: [RCloneService, RCloneApiService], + exports: [RCloneService, RCloneApiService, RCloneStatusService], }) export class RCloneModule {} diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.ts index b62f049565..64bc5530f4 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone.mutation.resolver.ts @@ -1,7 +1,7 @@ import { Logger } from '@nestjs/common'; import { Args, ResolveField, Resolver } from '@nestjs/graphql'; -import { Resource } from '@unraid/shared/graphql.model.js'; +import { Resource } from '@unraid/shared/graphql.model'; import { AuthActionVerb, AuthPossession, @@ -14,6 +14,7 @@ import { CreateRCloneRemoteInput, DeleteRCloneRemoteInput, RCloneRemote, + RCloneRemoteConfig, } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; /** @@ -38,7 +39,7 @@ export class RCloneMutationsResolver { name: input.name, type: input.type, parameters: {}, - config, + config: config as RCloneRemoteConfig, }; } catch (error) { this.logger.error(`Error creating remote: ${error}`); diff --git a/api/src/unraid-api/graph/resolvers/rclone/rclone.service.ts b/api/src/unraid-api/graph/resolvers/rclone/rclone.service.ts index 7ea6481e72..8e27436728 100644 --- a/api/src/unraid-api/graph/resolvers/rclone/rclone.service.ts +++ b/api/src/unraid-api/graph/resolvers/rclone/rclone.service.ts @@ -5,13 +5,13 @@ import { type Layout } from '@jsonforms/core'; import type { SettingSlice } from '@app/unraid-api/types/json-forms.js'; import { RCloneApiService } from '@app/unraid-api/graph/resolvers/rclone/rclone-api.service.js'; import { RCloneFormService } from '@app/unraid-api/graph/resolvers/rclone/rclone-form.service.js'; -import { RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; +import { RCloneJob, RCloneRemote } from '@app/unraid-api/graph/resolvers/rclone/rclone.model.js'; /** * Types for rclone backup configuration UI */ export interface RcloneBackupConfigValues { - configStep: number; + configStep: { current: number; total: number }; showAdvanced: boolean; name?: string; type?: string; @@ -48,7 +48,7 @@ export class RCloneService { */ async onModuleInit(): Promise { try { - if (!this.rcloneApiService.initialized) { + if (!this.rcloneApiService.isInitialized) { this.logger.warn( 'RClone API service is not initialized, skipping provider info loading' ); @@ -83,7 +83,7 @@ export class RCloneService { */ async getCurrentSettings(): Promise { return { - configStep: 0, + configStep: { current: 0, total: 0 }, showAdvanced: false, }; } @@ -125,4 +125,11 @@ export class RCloneService { return []; } } + + /** + * Gets enhanced job status with computed fields + */ + async getEnhancedJobStatus(jobId: string, configId?: string): Promise { + return this.rcloneApiService.getEnhancedJobStatus(jobId, configId); + } } diff --git a/api/src/unraid-api/graph/resolvers/resolvers.module.ts b/api/src/unraid-api/graph/resolvers/resolvers.module.ts index 9ef7b9ca78..8d817f0b62 100644 --- a/api/src/unraid-api/graph/resolvers/resolvers.module.ts +++ b/api/src/unraid-api/graph/resolvers/resolvers.module.ts @@ -2,15 +2,14 @@ import { Module } from '@nestjs/common'; import { AuthModule } from '@app/unraid-api/auth/auth.module.js'; import { ApiKeyModule } from '@app/unraid-api/graph/resolvers/api-key/api-key.module.js'; -import { ApiKeyResolver } from '@app/unraid-api/graph/resolvers/api-key/api-key.resolver.js'; import { ArrayModule } from '@app/unraid-api/graph/resolvers/array/array.module.js'; +import { BackupModule } from '@app/unraid-api/graph/resolvers/backup/backup.module.js'; import { ConfigResolver } from '@app/unraid-api/graph/resolvers/config/config.resolver.js'; import { CustomizationModule } from '@app/unraid-api/graph/resolvers/customization/customization.module.js'; import { DisksModule } from '@app/unraid-api/graph/resolvers/disks/disks.module.js'; import { DisplayResolver } from '@app/unraid-api/graph/resolvers/display/display.resolver.js'; import { DisplayService } from '@app/unraid-api/graph/resolvers/display/display.service.js'; import { DockerModule } from '@app/unraid-api/graph/resolvers/docker/docker.module.js'; -import { FlashBackupModule } from '@app/unraid-api/graph/resolvers/flash-backup/flash-backup.module.js'; import { FlashResolver } from '@app/unraid-api/graph/resolvers/flash/flash.resolver.js'; import { DevicesResolver } from '@app/unraid-api/graph/resolvers/info/devices.resolver.js'; import { DevicesService } from '@app/unraid-api/graph/resolvers/info/devices.service.js'; @@ -34,16 +33,18 @@ import { VmsService } from '@app/unraid-api/graph/resolvers/vms/vms.service.js'; import { ServicesResolver } from '@app/unraid-api/graph/services/services.resolver.js'; import { SharesResolver } from '@app/unraid-api/graph/shares/shares.resolver.js'; import { MeResolver } from '@app/unraid-api/graph/user/user.resolver.js'; +import { UtilsModule } from '@app/unraid-api/utils/utils.module.js'; @Module({ imports: [ + UtilsModule, ArrayModule, ApiKeyModule, AuthModule, + BackupModule, CustomizationModule, DockerModule, DisksModule, - FlashBackupModule, RCloneModule, SettingsModule, ], diff --git a/api/src/unraid-api/graph/utils/utils.module.ts b/api/src/unraid-api/graph/utils/utils.module.ts new file mode 100644 index 0000000000..6eef856e69 --- /dev/null +++ b/api/src/unraid-api/graph/utils/utils.module.ts @@ -0,0 +1,10 @@ +import { Global, Module } from '@nestjs/common'; + +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +@Global() +@Module({ + providers: [FormatService], + exports: [FormatService], +}) +export class UtilsModule {} diff --git a/api/src/unraid-api/main.ts b/api/src/unraid-api/main.ts index 3fe81ccb8d..1f82ff4c4d 100644 --- a/api/src/unraid-api/main.ts +++ b/api/src/unraid-api/main.ts @@ -18,7 +18,8 @@ export async function bootstrapNestServer(): Promise { const app = await NestFactory.create(AppModule, new FastifyAdapter(), { bufferLogs: false, - ...(LOG_LEVEL !== 'TRACE' ? { logger: false } : {}), + + ...(LOG_LEVEL !== 'DEBUG' ? { logger: false } : {}), }); // Enable validation globally diff --git a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/.login.php.last-download-time b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/.login.php.last-download-time index 83193e3506..0bafec0c4f 100644 --- a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/.login.php.last-download-time +++ b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/.login.php.last-download-time @@ -1 +1 @@ -1751630630443 \ No newline at end of file +1752326314433 \ No newline at end of file diff --git a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/DefaultPageLayout.php.last-download-time b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/DefaultPageLayout.php.last-download-time index 125d1dfd84..e1e928daee 100644 --- a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/DefaultPageLayout.php.last-download-time +++ b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/DefaultPageLayout.php.last-download-time @@ -1 +1 @@ -1751630630198 \ No newline at end of file +1752326314052 \ No newline at end of file diff --git a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/Notifications.page.last-download-time b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/Notifications.page.last-download-time index c7db09181c..ba3f2f983f 100644 --- a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/Notifications.page.last-download-time +++ b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/Notifications.page.last-download-time @@ -1 +1 @@ -1751630630343 \ No newline at end of file +1752326314199 \ No newline at end of file diff --git a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/auth-request.php.last-download-time b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/auth-request.php.last-download-time index c2f1dd9f49..169a741ea7 100644 --- a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/auth-request.php.last-download-time +++ b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/auth-request.php.last-download-time @@ -1 +1 @@ -1751630630571 \ No newline at end of file +1752326314557 \ No newline at end of file diff --git a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/rc.nginx.last-download-time b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/rc.nginx.last-download-time index 0e90294497..bb1ae4ba8a 100644 --- a/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/rc.nginx.last-download-time +++ b/api/src/unraid-api/unraid-file-modifier/modifications/__test__/__fixtures__/downloaded/rc.nginx.last-download-time @@ -1 +1 @@ -1751630630810 \ No newline at end of file +1752326314785 \ No newline at end of file diff --git a/api/src/unraid-api/utils/format.service.test.ts b/api/src/unraid-api/utils/format.service.test.ts new file mode 100644 index 0000000000..a2c24b9c4f --- /dev/null +++ b/api/src/unraid-api/utils/format.service.test.ts @@ -0,0 +1,59 @@ +import { describe, expect, it } from 'vitest'; + +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +describe('FormatService', () => { + const service = new FormatService(); + + describe('formatBytes', () => { + it('should format zero bytes', () => { + expect(service.formatBytes(0)).toBe('0 B'); + }); + + it('should format bytes to best unit', () => { + expect(service.formatBytes(1024)).toBe('1.02 KB'); + expect(service.formatBytes(1048576)).toBe('1.05 MB'); + expect(service.formatBytes(1073741824)).toBe('1.07 GB'); + }); + + it('should format with decimals when needed', () => { + expect(service.formatBytes(1536)).toBe('1.54 KB'); + expect(service.formatBytes(9636529)).toBe('9.64 MB'); + }); + }); + + describe('formatSpeed', () => { + it('should format zero speed', () => { + expect(service.formatSpeed(0)).toBe('0 B/s'); + }); + + it('should format speed with /s suffix', () => { + expect(service.formatSpeed(1024)).toBe('1.02 KB/s'); + expect(service.formatSpeed(1048576)).toBe('1.05 MB/s'); + expect(service.formatSpeed(1073741824)).toBe('1.07 GB/s'); + }); + + it('should format with decimals when needed', () => { + expect(service.formatSpeed(1536)).toBe('1.54 KB/s'); + expect(service.formatSpeed(9636529.183648435)).toBe('9.64 MB/s'); + }); + }); + + describe('formatDuration', () => { + it('should format small durations in seconds', () => { + expect(service.formatDuration(30)).toBe('30s'); + expect(service.formatDuration(45.5)).toBe('45.5s'); + }); + + it('should format longer durations to best unit', () => { + expect(service.formatDuration(60)).toBe('60 s'); + expect(service.formatDuration(3600)).toBe('60 min'); + expect(service.formatDuration(86400)).toBe('24 h'); + }); + + it('should format with decimals when needed', () => { + expect(service.formatDuration(90)).toBe('1.5 min'); + expect(service.formatDuration(11.615060290966666 * 60)).toBe('11.62 min'); + }); + }); +}); diff --git a/api/src/unraid-api/utils/format.service.ts b/api/src/unraid-api/utils/format.service.ts new file mode 100644 index 0000000000..2758de36de --- /dev/null +++ b/api/src/unraid-api/utils/format.service.ts @@ -0,0 +1,33 @@ +import { Injectable } from '@nestjs/common'; + +import { convert } from 'convert'; + +@Injectable() +export class FormatService { + formatBytes(bytes: number): string { + if (bytes === 0) return '0 B'; + + const result = convert(bytes, 'bytes').to('best'); + const value = + typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity; + return `${value} ${result.unit}`; + } + + formatSpeed(bytesPerSecond: number): string { + if (bytesPerSecond === 0) return '0 B/s'; + + const result = convert(bytesPerSecond, 'bytes').to('best'); + const value = + typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity; + return `${value} ${result.unit}/s`; + } + + formatDuration(seconds: number): string { + if (seconds < 60) return `${Math.round(seconds * 100) / 100}s`; + + const result = convert(seconds, 'seconds').to('best'); + const value = + typeof result.quantity === 'number' ? Number(result.quantity.toFixed(2)) : result.quantity; + return `${value} ${result.unit}`; + } +} diff --git a/api/src/unraid-api/utils/utils.module.ts b/api/src/unraid-api/utils/utils.module.ts new file mode 100644 index 0000000000..6eef856e69 --- /dev/null +++ b/api/src/unraid-api/utils/utils.module.ts @@ -0,0 +1,10 @@ +import { Global, Module } from '@nestjs/common'; + +import { FormatService } from '@app/unraid-api/utils/format.service.js'; + +@Global() +@Module({ + providers: [FormatService], + exports: [FormatService], +}) +export class UtilsModule {} diff --git a/package.json b/package.json index 34fc592571..3b9a2753b9 100644 --- a/package.json +++ b/package.json @@ -7,7 +7,7 @@ "build:watch": " pnpm -r --parallel build:watch", "dev": "pnpm -r dev", "unraid:deploy": "pnpm -r unraid:deploy", - "test": "pnpm -r test", + "test": "vitest", "lint": "pnpm -r lint", "lint:fix": "pnpm -r lint:fix", "type-check": "pnpm -r type-check", @@ -43,7 +43,8 @@ "@manypkg/cli": "0.24.0", "chalk": "5.4.1", "diff": "8.0.2", - "ignore": "7.0.5" + "ignore": "7.0.5", + "vitest": "3.2.4" }, "devDependencies": { "lint-staged": "16.1.2", @@ -54,7 +55,7 @@ }, "lint-staged": { "*.{js,jsx,ts,tsx,vue}": [ - "pnpm lint:fix" + "npx pnpm lint:fix" ] }, "packageManager": "pnpm@10.13.1" diff --git a/packages/unraid-api-plugin-connect/src/unraid-connect/connect-settings.resolver.ts b/packages/unraid-api-plugin-connect/src/unraid-connect/connect-settings.resolver.ts index 2f64a3845a..7ee5784ea0 100644 --- a/packages/unraid-api-plugin-connect/src/unraid-connect/connect-settings.resolver.ts +++ b/packages/unraid-api-plugin-connect/src/unraid-connect/connect-settings.resolver.ts @@ -3,11 +3,11 @@ import { EventEmitter2 } from '@nestjs/event-emitter'; import { Args, Mutation, Query, ResolveField, Resolver } from '@nestjs/graphql'; import { type Layout } from '@jsonforms/core'; +import { GraphQLJSON } from 'graphql-scalars'; import { Resource } from '@unraid/shared/graphql.model.js'; import { DataSlice } from '@unraid/shared/jsonforms/settings.js'; import { PrefixedID } from '@unraid/shared/prefixed-id-scalar.js'; import { UsePermissions } from '@unraid/shared/use-permissions.directive.js'; -import { GraphQLJSON } from 'graphql-scalars'; import { AuthActionVerb, AuthPossession } from 'nest-authz'; import { EVENTS } from '../helper/nest-tokens.js'; diff --git a/packages/unraid-shared/src/graphql.model.ts b/packages/unraid-shared/src/graphql.model.ts index 7c4554d164..fdc8ca7753 100644 --- a/packages/unraid-shared/src/graphql.model.ts +++ b/packages/unraid-shared/src/graphql.model.ts @@ -10,6 +10,7 @@ export enum Resource { ACTIVATION_CODE = 'ACTIVATION_CODE', API_KEY = 'API_KEY', ARRAY = 'ARRAY', + BACKUP = 'BACKUP', CLOUD = 'CLOUD', CONFIG = 'CONFIG', CONNECT = 'CONNECT', diff --git a/plugin/builder/build-txz.ts b/plugin/builder/build-txz.ts index 524676e8af..9f58bd05cc 100644 --- a/plugin/builder/build-txz.ts +++ b/plugin/builder/build-txz.ts @@ -10,6 +10,7 @@ import { cleanupTxzFiles } from "./utils/cleanup"; import { apiDir } from "./utils/paths"; import { getVendorBundleName, getVendorFullPath } from "./build-vendor-store"; import { getAssetUrl } from "./utils/bucket-urls"; +import { ensureRclone } from "./utils/rclone-helper"; // Recursively search for manifest files diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 297220747c..667c54eea8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -20,6 +20,9 @@ importers: ignore: specifier: 7.0.5 version: 7.0.5 + vitest: + specifier: 3.2.4 + version: 3.2.4(@types/node@22.16.3)(@vitest/ui@3.2.4)(happy-dom@18.0.1)(jiti@2.4.2)(jsdom@26.1.0)(stylus@0.57.0)(terser@5.43.1)(tsx@4.20.3)(yaml@2.8.0) devDependencies: lint-staged: specifier: 16.1.2 @@ -157,8 +160,8 @@ importers: specifier: 1.0.2 version: 1.0.2 cron: - specifier: 4.3.1 - version: 4.3.1 + specifier: 4.3.0 + version: 4.3.0 cross-fetch: specifier: 4.1.0 version: 4.1.0 @@ -817,6 +820,9 @@ importers: '@vueuse/core': specifier: 13.5.0 version: 13.5.0(vue@3.5.17(typescript@5.8.3)) + ajv: + specifier: ^8.17.1 + version: 8.17.1 class-variance-authority: specifier: 0.7.1 version: 0.7.1 @@ -848,6 +854,9 @@ importers: specifier: 1.3.2 version: 1.3.2 devDependencies: + '@eslint/js': + specifier: 9.30.1 + version: 9.30.1 '@ianvs/prettier-plugin-sort-imports': specifier: 4.5.1 version: 4.5.1(@vue/compiler-sfc@3.5.17)(prettier@3.6.2) @@ -980,6 +989,9 @@ importers: vue: specifier: 3.5.17 version: 3.5.17(typescript@5.8.3) + vue-eslint-parser: + specifier: ^10.2.0 + version: 10.2.0(eslint@9.30.1(jiti@2.4.2)) vue-tsc: specifier: 3.0.1 version: 3.0.1(typescript@5.8.3) @@ -6509,10 +6521,6 @@ packages: resolution: {integrity: sha512-ciiYNLfSlF9MrDqnbMdRWFiA6oizSF7kA1osPP9lRzNu0Uu+AWog1UKy7SkckiDY2irrNjeO6qLyKnXC8oxmrw==} engines: {node: '>=18.x'} - cron@4.3.1: - resolution: {integrity: sha512-7x7DoEOxV11t3OPWWMjj1xrL1PGkTV5RV+/54IJTZD7gStiaMploY43EkeBSkDZTLRbUwk+OISbQ0TR133oXyA==} - engines: {node: '>=18.x'} - croner@4.1.97: resolution: {integrity: sha512-/f6gpQuxDaqXu+1kwQYSckUglPaOrHdbIlBAu0YuW8/Cdb45XwXYNUBXg3r/9Mo6n540Kn/smKcZWko5x99KrQ==} @@ -12984,12 +12992,6 @@ packages: peerDependencies: vue: '>=2' - vue-eslint-parser@10.1.3: - resolution: {integrity: sha512-dbCBnd2e02dYWsXoqX5yKUZlOt+ExIpq7hmHKPb5ZqKcjf++Eo0hMseFTZMLKThrUk61m+Uv6A2YSBve6ZvuDQ==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - peerDependencies: - eslint: ^8.57.0 || ^9.0.0 - vue-eslint-parser@10.2.0: resolution: {integrity: sha512-CydUvFOQKD928UzZhTp4pr2vWz1L+H99t7Pkln2QSPdvmURT0MoC4wUccfCnuEaihNsu9aYYyk+bep8rlfkUXw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -19434,11 +19436,6 @@ snapshots: '@types/luxon': 3.6.2 luxon: 3.6.1 - cron@4.3.1: - dependencies: - '@types/luxon': 3.6.2 - luxon: 3.6.1 - croner@4.1.97: {} croner@9.1.0: {} @@ -20502,7 +20499,7 @@ snapshots: eslint-plugin-import-x@4.15.2(@typescript-eslint/utils@8.36.0(eslint@9.30.1(jiti@2.4.2))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint@9.30.1(jiti@2.4.2)): dependencies: - '@typescript-eslint/types': 8.34.1 + '@typescript-eslint/types': 8.36.0 comment-parser: 1.4.1 debug: 4.4.1(supports-color@5.5.0) eslint: 9.30.1(jiti@2.4.2) @@ -26861,19 +26858,6 @@ snapshots: vue: 3.5.17(typescript@5.8.3) vue-inbrowser-compiler-independent-utils: 4.71.1(vue@3.5.17(typescript@5.8.3)) - vue-eslint-parser@10.1.3(eslint@9.30.1(jiti@2.4.2)): - dependencies: - debug: 4.4.1(supports-color@5.5.0) - eslint: 9.30.1(jiti@2.4.2) - eslint-scope: 8.4.0 - eslint-visitor-keys: 4.2.1 - espree: 10.4.0 - esquery: 1.6.0 - lodash: 4.17.21 - semver: 7.7.2 - transitivePeerDependencies: - - supports-color - vue-eslint-parser@10.2.0(eslint@9.30.1(jiti@2.4.2)): dependencies: debug: 4.4.1(supports-color@5.5.0) @@ -26918,7 +26902,7 @@ snapshots: postcss-styl: 0.12.3 recast-x: 1.0.5 table: 6.9.0 - vue-eslint-parser: 10.1.3(eslint@9.30.1(jiti@2.4.2)) + vue-eslint-parser: 10.2.0(eslint@9.30.1(jiti@2.4.2)) transitivePeerDependencies: - eslint - supports-color diff --git a/unraid-ui/eslint.config.ts b/unraid-ui/eslint.config.ts index 72b3e56d7b..0d167249e5 100644 --- a/unraid-ui/eslint.config.ts +++ b/unraid-ui/eslint.config.ts @@ -1,15 +1,14 @@ // For more info, see https://github.com/storybookjs/eslint-plugin-storybook#configuration-flat-config-format -import storybook from "eslint-plugin-storybook"; import eslint from '@eslint/js'; -// @ts-expect-error No Declaration For This Plugin import importPlugin from 'eslint-plugin-import'; import noRelativeImportPaths from 'eslint-plugin-no-relative-import-paths'; import prettier from 'eslint-plugin-prettier'; import vuePlugin from 'eslint-plugin-vue'; import tseslint from 'typescript-eslint'; -// Import vue-eslint-parser as an ESM import import vueEslintParser from 'vue-eslint-parser'; +import storybook from 'eslint-plugin-storybook'; +// Import vue-eslint-parser as an ESM import // Common rules shared across file types const commonRules = { diff --git a/unraid-ui/package.json b/unraid-ui/package.json index 6291b8749f..c763b27de8 100644 --- a/unraid-ui/package.json +++ b/unraid-ui/package.json @@ -55,6 +55,7 @@ "@jsonforms/vue": "3.6.0", "@jsonforms/vue-vanilla": "3.6.0", "@vueuse/core": "13.5.0", + "ajv": "^8.17.1", "class-variance-authority": "0.7.1", "clsx": "2.1.1", "dompurify": "3.2.6", @@ -67,6 +68,7 @@ "vue-sonner": "1.3.2" }, "devDependencies": { + "@eslint/js": "9.30.1", "@ianvs/prettier-plugin-sort-imports": "4.5.1", "@storybook/addon-docs": "9.0.16", "@storybook/addon-links": "9.0.16", @@ -111,6 +113,7 @@ "vite-plugin-vue-devtools": "7.7.7", "vitest": "3.2.4", "vue": "3.5.17", + "vue-eslint-parser": "^10.2.0", "vue-tsc": "3.0.1", "wrangler": "^3.114.10" }, diff --git a/unraid-ui/src/components.ts b/unraid-ui/src/components.ts index 182d8c1f0c..32e8432480 100644 --- a/unraid-ui/src/components.ts +++ b/unraid-ui/src/components.ts @@ -6,8 +6,8 @@ export * from '@/components/common/dropdown-menu'; export * from '@/components/common/loading'; export * from '@/components/form/input'; export * from '@/components/form/label'; -export * from '@/components/form/number'; export * from '@/components/form/lightswitch'; +export * from '@/components/form/number'; export * from '@/components/form/select'; export * from '@/components/form/switch'; export * from '@/components/common/scroll-area'; diff --git a/unraid-ui/src/components/form/lightswitch/Lightswitch.vue b/unraid-ui/src/components/form/lightswitch/Lightswitch.vue index 2d1e61ecbc..513fdbb8ed 100644 --- a/unraid-ui/src/components/form/lightswitch/Lightswitch.vue +++ b/unraid-ui/src/components/form/lightswitch/Lightswitch.vue @@ -1,7 +1,4 @@ + + diff --git a/unraid-ui/src/forms/LabelRenderer.vue b/unraid-ui/src/forms/LabelRenderer.vue index be6b3009a8..afc893fd0f 100644 --- a/unraid-ui/src/forms/LabelRenderer.vue +++ b/unraid-ui/src/forms/LabelRenderer.vue @@ -5,14 +5,14 @@ import { type UISchemaElement } from '@jsonforms/core'; import { rendererProps, useJsonFormsRenderer } from '@jsonforms/vue'; import { computed, ref, watchEffect } from 'vue'; -// Define a type for our specific Label UI Schema -interface LabelUISchema extends UISchemaElement { +// Define a type for our specific Label UI Schema using intersection type +type LabelUISchema = UISchemaElement & { text?: string; options?: { description?: string; format?: 'title' | 'heading' | 'documentation' | string; // Add other formats as needed }; -} +}; const props = defineProps(rendererProps()); diff --git a/unraid-ui/src/forms/Select.vue b/unraid-ui/src/forms/Select.vue index 666757a14b..435cd19dcd 100644 --- a/unraid-ui/src/forms/Select.vue +++ b/unraid-ui/src/forms/Select.vue @@ -59,7 +59,6 @@ const onChange = (value: unknown) => { - {{ option.label }} diff --git a/unraid-ui/src/forms/SteppedLayout.vue b/unraid-ui/src/forms/SteppedLayout.vue index 147cd4326e..72aa065f48 100644 --- a/unraid-ui/src/forms/SteppedLayout.vue +++ b/unraid-ui/src/forms/SteppedLayout.vue @@ -13,10 +13,9 @@ import { type JsonFormsSubStates, type JsonSchema, type Layout, - type UISchemaElement, } from '@jsonforms/core'; import { DispatchRenderer, useJsonFormsLayout, type RendererProps } from '@jsonforms/vue'; -import { computed, inject, ref, type Ref } from 'vue'; +import { computed, inject, nextTick, onMounted, ref, type Ref } from 'vue'; // Define props based on RendererProps const props = defineProps>(); @@ -48,15 +47,30 @@ const numSteps = computed(() => stepsConfig.value.length); // --- Current Step Logic --- Use injected core.data const currentStep = computed(() => { const stepData = core!.data?.configStep; - // Handle both the new object format and the old number format + + // Return current step if properly initialized if (typeof stepData === 'object' && stepData !== null && typeof stepData.current === 'number') { - // Ensure step is within bounds return Math.max(0, Math.min(stepData.current, numSteps.value - 1)); } - // Fallback for initial state or old number format - const numericStep = typeof stepData === 'number' ? stepData : 0; - return Math.max(0, Math.min(numericStep, numSteps.value - 1)); + + // Return 0 as default if not initialized yet + return 0; +}); + +// Initialize configStep on mount +onMounted(async () => { + // Wait for next tick to ensure form data is available + await nextTick(); + + const stepData = core!.data?.configStep; + + // Only initialize if configStep doesn't exist or is in wrong format + if (!stepData || typeof stepData !== 'object' || typeof stepData.current !== 'number') { + const initialStep = { current: 0, total: numSteps.value }; + dispatch(Actions.update('configStep', () => initialStep)); + } }); + const isLastStep = computed(() => numSteps.value > 0 && currentStep.value === numSteps.value - 1); // --- Step Update Logic --- @@ -71,15 +85,30 @@ const updateStep = (newStep: number) => { dispatch(Actions.update('configStep', () => ({ current: newStep, total }))); }; +// --- Type guard for elements with step options --- +interface ElementWithStep { + options?: { + step?: number; + [key: string]: unknown; + }; +} + +function hasStepOption(element: unknown): element is ElementWithStep { + return ( + element != null && + typeof element === 'object' && + 'options' in element && + typeof element.options === 'object' && + element.options !== null && + typeof (element.options as { step?: number }).step === 'number' + ); +} + // --- Filtered Elements for Current Step --- const currentStepElements = computed(() => { - const filtered = (props.uischema.elements || []).filter((element: UISchemaElement) => { - // Check if the element has an 'options' object and an 'step' property - return ( - typeof element.options === 'object' && - element.options !== null && - element.options.step === currentStep.value - ); + const elements = props.uischema.elements || []; + const filtered = elements.filter((element) => { + return hasStepOption(element) && element.options!.step === currentStep.value; }); return filtered; }); @@ -142,15 +171,12 @@ const getStepState = (stepIndex: number): StepState => { /> - - -
control.value.uischema?.options?.placeholder