Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into 9169-large-entry-error
Browse files Browse the repository at this point in the history
  • Loading branch information
fungairino committed Sep 20, 2024
2 parents 16e339b + 839fa1d commit 1ff8a80
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 25 deletions.
6 changes: 3 additions & 3 deletions src/auth/featureFlags.ts
Original file line number Diff line number Diff line change
Expand Up @@ -101,12 +101,12 @@ export const FeatureFlags = {
SETTINGS_EXPERIMENTAL: "settings-experimental",

/**
* PixieBrix error service kill switch/
* PixieBrix error service off switch/
*/
ERROR_SERVICE_DISABLE_REPORT: "error-service-disable-report",

/**
* Datadog error telemetry kill switch.
* Datadog error telemetry off switch.
*
* Originally introduced when moving Datadog to the offscreen document in order to turn off error telemetry if
* the offscreen document implementation was buggy.
Expand All @@ -115,7 +115,7 @@ export const FeatureFlags = {
"application-error-telemetry-disable-report",

/**
* IndexDB logging kill switch. This disables writing to the LOG database, along with
* IndexDB logging off switch. This disables writing to the LOG database, along with
* the clear debug logging and sweep logs functionality.
*
* Introduced to mitigate issues around idb logging causing runtime performance issues. See:
Expand Down
70 changes: 48 additions & 22 deletions src/telemetry/logging.ts
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,10 @@ async function openLoggingDB() {
},
blocked(currentVersion: number, blockedVersion: number) {
console.debug("Database blocked.", { currentVersion, blockedVersion });
// This should never happen, since we immediately close connections if blocking below,
// but just in case, close the connection here so it doesn't block openLoggingDB from
// resolving
database?.close();
},
blocking(currentVersion: number, blockedVersion: number) {
// Don't block closing/upgrading the database
Expand Down Expand Up @@ -272,7 +276,9 @@ export async function clearLogs(): Promise<void> {
*/
export async function clearLog(context: MessageContext = {}): Promise<void> {
await withLoggingDB(async (db) => {
const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite");
const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite", {
durability: "relaxed",
});

if (isEmpty(context)) {
await tx.store.clear();
Expand All @@ -297,7 +303,9 @@ export async function getLogEntries(
): Promise<LogEntry[]> {
return withLoggingDB(async (db) => {
const objectStore = db
.transaction(ENTRY_OBJECT_STORE, "readonly")
.transaction(ENTRY_OBJECT_STORE, "readonly", {
durability: "relaxed",
})
.objectStore(ENTRY_OBJECT_STORE);

let indexKey: IndexKey | undefined;
Expand Down Expand Up @@ -530,7 +538,9 @@ export async function clearModComponentDebugLogs(
}

await withLoggingDB(async (db) => {
const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite");
const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite", {
durability: "relaxed",
});
const index = tx.store.index("modComponentId");
for await (const cursor of index.iterate(modComponentId)) {
if (cursor.value.level === "debug" || cursor.value.level === "trace") {
Expand All @@ -548,33 +558,49 @@ async function _sweepLogs(): Promise<void> {
return;
}

await withLoggingDB(async (db) => {
const numRecords = await db.count(ENTRY_OBJECT_STORE);
const abortController = new AbortController();
// Ensure in cases where the sweep is taking too long, we abort the operation to reduce the likelihood
// of blocking other db transactions.
const timeoutId = setTimeout(abortController.abort, 30_000);

if (numRecords > MAX_LOG_RECORDS) {
const numToDelete = numRecords - MAX_LOG_RECORDS * LOG_STORAGE_RATIO;
try {
await withLoggingDB(async (db) => {
const numRecords = await db.count(ENTRY_OBJECT_STORE);

console.debug("Sweeping logs", {
numRecords,
numToDelete,
});
if (numRecords > MAX_LOG_RECORDS) {
const numToDelete = numRecords - MAX_LOG_RECORDS * LOG_STORAGE_RATIO;

console.debug("Sweeping logs", {
numRecords,
numToDelete,
});

const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite", {
durability: "relaxed",
});

const tx = db.transaction(ENTRY_OBJECT_STORE, "readwrite");
let deletedCount = 0;

let deletedCount = 0;
// Ideally this would be ordered by timestamp to delete the oldest records, but timestamp is not an index.
// This might mostly "just work" if the cursor happens to iterate in insertion order
for await (const cursor of tx.store) {
if (abortController.signal.aborted) {
console.warn("Log sweep aborted due to timeout");
break;
}

// Ideally this would be ordered by timestamp to delete the oldest records, but timestamp is not an index.
// This might mostly "just work" if the cursor happens to iterate in insertion order
for await (const cursor of tx.store) {
await cursor.delete();
deletedCount++;
await cursor.delete();
deletedCount++;

if (deletedCount > numToDelete) {
return;
if (deletedCount > numToDelete) {
return;
}
}
}
}
}, _sweepLogs.name);
}, _sweepLogs.name);
} finally {
clearTimeout(timeoutId);
}
}

/**
Expand Down

0 comments on commit 1ff8a80

Please sign in to comment.