mirror of
https://github.com/docmost/docmost.git
synced 2026-05-14 12:44:16 +08:00
fix(base): enable duckdb disk spill + raise memory default to avoid oom on large bases
This commit is contained in:
@@ -65,6 +65,9 @@ class FakeEnvService {
|
||||
getBaseQueryCacheThreads() {
|
||||
return 2;
|
||||
}
|
||||
getBaseQueryCacheTempDirectory() {
|
||||
return require('node:os').tmpdir() + '/docmost-duckdb-test';
|
||||
}
|
||||
getRedisUrl() {
|
||||
return REDIS_URL;
|
||||
}
|
||||
@@ -453,6 +456,9 @@ describeIntegration('BaseQueryCacheService LRU eviction', () => {
|
||||
getBaseQueryCacheThreads() {
|
||||
return 2;
|
||||
}
|
||||
getBaseQueryCacheTempDirectory() {
|
||||
return require('node:os').tmpdir() + '/docmost-duckdb-test';
|
||||
}
|
||||
getRedisUrl() {
|
||||
return REDIS_URL;
|
||||
}
|
||||
@@ -1101,6 +1107,9 @@ describeIntegration('BaseQueryCacheService warm-up on boot', () => {
|
||||
getBaseQueryCacheThreads() {
|
||||
return 2;
|
||||
}
|
||||
getBaseQueryCacheTempDirectory() {
|
||||
return require('node:os').tmpdir() + '/docmost-duckdb-test';
|
||||
}
|
||||
getRedisUrl() {
|
||||
return REDIS_URL;
|
||||
}
|
||||
|
||||
@@ -36,16 +36,34 @@ export class CollectionLoader {
|
||||
const properties = await this.basePropertyRepo.findByBaseId(baseId);
|
||||
const specs = buildColumnSpecs(properties);
|
||||
|
||||
const { memoryLimit, threads } = this.config.config;
|
||||
const { memoryLimit, threads, tempDirectory } = this.config.config;
|
||||
|
||||
// Ensure the temp directory exists so DuckDB can spill to it.
|
||||
// Swallow errors — if creation fails, DuckDB will fail its own sanity
|
||||
// check and we'll log that instead of crashing here.
|
||||
try {
|
||||
const fs = require('node:fs');
|
||||
fs.mkdirSync(tempDirectory, { recursive: true });
|
||||
} catch {
|
||||
/* swallow */
|
||||
}
|
||||
|
||||
const instance = await DuckDBInstance.create(':memory:', {
|
||||
memory_limit: memoryLimit,
|
||||
threads: String(threads),
|
||||
temp_directory: tempDirectory,
|
||||
});
|
||||
const connection = await instance.connect();
|
||||
|
||||
try {
|
||||
await this.pgExtension.configureOnConnection(connection);
|
||||
|
||||
// Disable insertion-order preservation during bulk load — DuckDB's docs
|
||||
// explicitly recommend this for memory-pressure on large inserts. Our
|
||||
// loader doesn't depend on the insertion order (we sort via indexes
|
||||
// or keyset cursors later), so this is free memory savings.
|
||||
await connection.run('SET preserve_insertion_order = false');
|
||||
|
||||
// Bulk load via CREATE TABLE AS SELECT. JSONB extraction happens
|
||||
// server-side via the base_cell_* helpers; DuckDB streams typed
|
||||
// columns over COPY BINARY into its vectorized insert path.
|
||||
|
||||
@@ -9,6 +9,7 @@ export type QueryCacheConfig = {
|
||||
memoryLimit: string;
|
||||
threads: number;
|
||||
trace: boolean;
|
||||
tempDirectory: string;
|
||||
};
|
||||
|
||||
@Injectable()
|
||||
@@ -23,6 +24,7 @@ export class QueryCacheConfigProvider {
|
||||
memoryLimit: env.getBaseQueryCacheMemoryLimit(),
|
||||
threads: env.getBaseQueryCacheThreads(),
|
||||
trace: env.getBaseQueryCacheTrace(),
|
||||
tempDirectory: env.getBaseQueryCacheTempDirectory(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user