Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion app/common/ConfigAPI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ export class ConfigAPI extends BaseAPI {
}

public async healthcheck(): Promise<void> {
const resp = await this.request(`${this._homeUrl}/status?ready=1`);
const resp = await this.request(`${this._homeUrl}/status?allInstancesReady=1`);
if (!resp.ok) {
throw new Error(await resp.text());
}
Expand Down
2 changes: 1 addition & 1 deletion app/server/MergedServer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ export class MergedServer {
await this.flexServer.finalizePlugins(this.hasComponent("home") ? checkUserContentPort() : null);
this.flexServer.checkOptionCombinations();
this.flexServer.summary();
this.flexServer.setReady(true);
this.flexServer.ready = true;

if (this._options.extraWorkers) {
if (!process.env.REDIS_URL) {
Expand Down
14 changes: 13 additions & 1 deletion app/server/lib/FlexServer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ import {AddressInfo} from 'net';
import fetch from 'node-fetch';
import * as path from 'path';
import * as serveStatic from 'serve-static';
import { HealthChecker } from './HealthChecker';

// Health checks are a little noisy in the logs, so we don't show them all.
// We show the first N health checks:
Expand Down Expand Up @@ -213,6 +214,7 @@ export class FlexServer implements GristServer {
private _emitNotifier = new EmitNotifier();
private _testPendingNotifications: number = 0;
private _latestVersionAvailable?: LatestVersionAvailable;
private _healthChecker: HealthChecker;

constructor(public port: number, public name: string = 'flexServer',
public readonly options: FlexServerOptions = {}) {
Expand Down Expand Up @@ -274,6 +276,8 @@ export class FlexServer implements GristServer {
this.setLatestVersionAvailable(latestVersionAvailable);
});

this._healthChecker = new HealthChecker(this);

// The electron build is not supported at this time, but this stub
// implementation of electronServerMethods is present to allow kicking
// its tires.
Expand Down Expand Up @@ -600,6 +604,9 @@ export class FlexServer implements GristServer {
if (isParameterOn(req.query.ready)) {
checks.set('ready', this._isReady);
}
if (isParameterOn(req.query.allInstancesReady)) {
checks.set('allInstancesReady', this._healthChecker.allServersOkay(timeout, true));
}
let extra = '';
let ok = true;
// If we had any extra check, collect their status to report them.
Expand Down Expand Up @@ -1059,6 +1066,7 @@ export class FlexServer implements GristServer {
if (this.httpsServer) { this.httpsServer.close(); }
if (this.housekeeper) { await this.housekeeper.stop(); }
if (this._jobs) { await this._jobs.stop(); }
await this._healthChecker.close();
await this._shutdown();
if (this._accessTokens) { await this._accessTokens.close(); }
// Do this after _shutdown, since DocWorkerMap is used during shutdown.
Expand Down Expand Up @@ -1890,7 +1898,7 @@ export class FlexServer implements GristServer {
}
}

public setReady(value: boolean) {
public set ready(value: boolean) {
if(value) {
log.debug('FlexServer is ready');
} else {
Expand All @@ -1899,6 +1907,10 @@ export class FlexServer implements GristServer {
this._isReady = value;
}

public get ready() {
return this._isReady;
}

public checkOptionCombinations() {
// Check for some bad combinations we should warn about.
const allowedWebhookDomains = appSettings.section('integrations').flag('allowedWebhookDomains').readString({
Expand Down
4 changes: 2 additions & 2 deletions app/server/lib/GristServer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ export interface StorageCoordinator {
export interface GristServer extends StorageCoordinator {
readonly create: ICreate;
readonly testPending: boolean;
ready: boolean;
settings?: IGristCoreConfig;
getHost(): string;
getHomeUrl(req: express.Request, relPath?: string): string;
Expand Down Expand Up @@ -103,7 +104,6 @@ export interface GristServer extends StorageCoordinator {
isRestrictedMode(): boolean;
onUserChange(callback: (change: UserChange) => Promise<void>): void;
onStreamingDestinationsChange(callback: (orgId?: number) => Promise<void>): void;
setReady(value: boolean): void;
}

export interface GristLoginSystem {
Expand Down Expand Up @@ -163,6 +163,7 @@ export function createDummyGristServer(): GristServer {
return {
create,
testPending: false,
ready: true,
settings: loadGristCoreConfig(),
getHost() { return 'localhost:4242'; },
getHomeUrl() { return 'http://localhost:4242'; },
Expand Down Expand Up @@ -214,7 +215,6 @@ export function createDummyGristServer(): GristServer {
onUserChange() { /* do nothing */ },
onStreamingDestinationsChange() { /* do nothing */ },
hardDeleteDoc() { return Promise.resolve(); },
setReady() { /* do nothing */ },
};
}

Expand Down
140 changes: 140 additions & 0 deletions app/server/lib/HealthChecker.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import {GristServer} from 'app/server/lib/GristServer';
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you add a doc-comment to this file that describes how these work, and why it's done this way? In particular, I assume it's a deliberate choice to rely on redis rather than, say, call each server's /status endpoint -- is that because we don't want to assume that servers can communicate with each other directly? That also means that this check doesn't reflect the ability for the servers to communicate with each other, right?

Copy link
Contributor Author

@jordigh jordigh Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I'll write something.

How would servers know what other servers exist without Redis?

And since we're already using Redis to communicate between doc workers, it seemed like a natural extension to communicate with all Grist instances.

Does that make sense?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well... this isn't entirely convincing. But I might not fully understand the goal of this check. Is it about checking whether a multi-server setup works correctly, or is it more narrow than that, like how many home-server or doc-workers are running?

How would servers know what other servers exist without Redis?

Good question, even without the "Redis" part. Is this check intended to verify that servers know about each other for the purpose of their functionality, or only for the purpose of the health check?

we're already using Redis to communicate between doc workers

We use Redis to coordinate assignments of docs to workers, and for some other things, like sessions and notifications, but for most traffic between home servers and doc workers, I think we rely on them being able to make HTTP requests to one another.

I am asking these questions to understand the purposes and make sure we are on the same page, but not saying this should be part of this healthcheck -- I can imagine that when setting up, there is value to know that, say, 3 doc-workers are running and healthy, even if the networking part of communicating to them isn't yet working. Is that the goal?

One more question: if I restart some servers, do we want this check to tell me how many got successfully restarted? Just knowing how many are healthy doesn't tell me if I have a mix of old and new ones.

Copy link
Contributor Author

@jordigh jordigh Jul 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The ultimate goal here is to be able to restart all the instances. That will be the next step. The restart is a button on the admin, but currently it only restarts whatever server gets hit with the request to restart.

After restarting all the instances, how does the web browser know when it's time to reload the page and get the newly restarted Grist page?

I decided it should be when all of the servers were restarted and ready to serve requests. That's how I arrived to this design. If we reloaded the page without all servers being restarted, we might be in some ambiguous state where some servers aren't ready yet.

I can imagine that when setting up, there is value to know that, say, 3 doc-workers are running and healthy, even if the networking part of communicating to them isn't yet working. Is that the goal?

Not explicitly. This is using the ready status of an instance which is a little different from healthy. The healthy state just means that the status endpoint is ready, which happens very early during Grist initialisation. This new endpoint instead checks that all instances that are registered on Redis have hit the ready status, which is at the end of the initialisation of an instance, one of the last things that MergedServer does. The ready status should mean that networking is also ready. Of course, if you have some networking problem between your servers where they can't even reach each other or Redis, this check won't catch that.

As to the choice of using Redis instead of internal http requests between the Grist instances, it just seemed easier to me to use a shared Redis channel since Redis seemed inevitable for discovery to begin with.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If restart is the main goal, then I think my last question from previous comment might be relevant?

Copy link
Contributor Author

@jordigh jordigh Jul 30, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, so it seems to me like the problem of an instance spontaneously dying already requires infrastructure not directly shipped with Grist Core. Maybe we can offer advice on how to handle this problem or supply this infrastructure outside of this PR.

What I can add here is an extra endpoint to instruct a server to clean up the registry of instances on Redis.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you are right, and this comment comes closest to explaining why I feel this whole approach isn't the right one for checking health of a multi-server installation. The issue is that not one part of this knows anything about which servers are expected to be a part of the installation. That knowledge is presumably in some external infrastructure, but then only with the help of that external infrastructure can you know the health of the installation.

E.g. let's say you start a cluster with 3 home servers and 10 doc workers, and you forgot to configure Redis for doc-workers. Then home servers register with Redis, doc-workers don't, and the healthcheck done this way will report that everything is healthy, when it's not even close. Or let's say everything is up and connected to Redis, and you updated some config and told everything to restart, but half your servers didn't react to your restart request, and are still running with the old config (e.g. wrong activation key, or wrong version, etc). Still, this healthcheck will report healthy. If you messed up and got two home servers running that point to different Redis URLs, whichever one the LB happens to ask will each independently report it is healthy, though the setup is broken.

All this approach does is check whether servers that successfully connected to Redis and registered themselves are still connected. Even for the narrow situation when everything is up and clean and registered in Redis, and there is nothing stale, does this help with restarts? As soon as you tell all servers to restart, they'll unregister themselves before exiting, and the healthcheck will return "all clear" even if they don't start up again, since it only checks servers that have successfully started and registered themselves.

I'm just making up scenarios, but fundamentally, it seems like anyone running an actual multi-server installation must have some external infrastructure to know what the servers are intended to be running. If we try to build tools to manage a cluster (even e.g. turn on "enterprise") without at all being aware of this infrastructure, or knowing which servers the admin intends to be affected, I think we might make the job of the administrator harder rather than easier.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we try to build tools to manage a cluster (even e.g. turn on "enterprise") without at all being aware of this infrastructure, or knowing which servers the admin intends to be affected

Tracing the problem back, "turning on enterprise" converted to a "reboot all instances" problem because of when configuration is loaded in the code. One could imagine changing that. But for an admin panel, if we expect in the future to offer ways to change configuration other than the enterprise toggle, the ability to reload the node process everywhere does feel like a good investment, so we don't have to be policing every PR that reads configuration. And being able to display some state about the instances also seems obviously useful.

But it is true that knowing the set of live instances in order to coordinate with them is awkward. Registering/deregistering is brittle. Heartbeats on a channel that e.g. home servers listen to and maintain some state for are an alternative, but would also have some failure modes.

For a configuration change, you could imagine the following process:

  • All servers broadcast an {id: ..., version: ...} heartbeat from time to time on heartbeat channel.
  • Home servers listen to that channel, store last heartbeats per server with timestamp and expiry after N minutes, in a heartbeats object.
  • When a home server wishes to initiate a config change, it broadcasts a {action: "restart", version: ...} message on a chatter channel everyone listens to. Version code is invented, to represent the config change.
  • Servers honor this action request once they see it, and change the version in their heartbeats to match once the config change is incorporated.
  • Home servers accumulate heartbeats as normal.

Rather than thinking about health, you can think about whether all servers issuing heartbeats are known to be using the latest config.

Needs fleshing out and has problems, just a different direction I was thinking about. But a reasonable alternative could also be to just back off from supporting an enterprise toggle in multi-server installations (but how do you know they are multi-server? use existence of redis as a hint?) and just give instructions.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it makes a lot more sense if we step away from "is cluster healthy" question, and instead ask something like: "are all servers that are connected to Redis on the expected version of config?"

Also, by exposing more information than a boolean (e.g. "list of all servers connected to Redis"), we'd make it possible for an admin to judge health by comparing that list to what they know about how many servers they expect to be up.

Am I right that this is the first foray into adding tools within Grist to make it easier for self-hosters to run multi-server installations? It would be good if it was a start that made further improvements easier. We know there are a number of difficult things about multi-server, including getting servers to be able to talk to each other (configuring APP_DOC_INTERNAL_URL and the like) and unregistering crashed doc-workers. What seems useful is to have all servers (doc-worker and home-servers) report their info. Maybe instead of broadcasting a heartbeat on Redis pubsub, they can update periodically their info in Redis: including start time, version of Grist, version of config (whatever that means), internal URL, status bits for things a server knows how to check for in its usual /status endpoint (like DB connectivity), and the timestamp of this update. This would be enough information to answer a lot of questions, particularly in combination with other info (like knowledge of expected servers; results of pings to internal urls; contents of workers-available key holding doc-worker registrations, etc)

For the restart task, a home server requesting a restart would only have to poll this info for up to a timeout, until it sees that everyone is reporting the expected version of config.

Setting data in Redis has the advantage over broadcasting is that a server (or even a separate tool) can do a check even if it hasn't been subscribed to pubsub since the right point in time. E.g. if a home server requesting a restart is itself restarting, or if a separate page load of Admin Panel is asking a different home server for whether the config change has taken effect.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe instead of broadcasting a heartbeat on Redis pubsub, they can update periodically their info in Redis

That makes sense!

Am I right that this is the first foray into adding tools within Grist to make it easier for self-hosters to run multi-server installations? It would be good if it was a start that made further improvements easier.

Exactly, that was my hope. Until now, multi-server installations have been ignored.

import log from 'app/server/lib/log';
import {createPubSubManager, IPubSubManager} from 'app/server/lib/PubSubManager';
import * as shutdown from 'app/server/lib/shutdown';

import {v4 as uuidv4} from 'uuid';

// Not to be confused with health checks from the frontend, these
// request/response pairs are internal checks between Grist instances
// in multi-server environments
interface ServerHealthcheckRequest {
id: string;
instanceId: string;
checkReady: boolean;
}
interface ServerHealthcheckResponse {
instanceId: string;
requestId: string;
healthy: boolean;
}

// For keeping track of pending health checks for all other servers
// for each request that was broadcast to all of them.
interface PendingServerHealthCheck {
expectedCount: number;
responses: Record<string, boolean>;
resolve: (res: boolean) => void;
reject: (err: Error) => void;
timeout: NodeJS.Timeout;
}

/** This class uses pubsub via Redis, if available, to register this
* Grist instance and check that all other instances are healthy.
*
* In single-server instances, it also works without Redis, leveraging
* the dummy defaults of `PubSubManager`.
*/
export class HealthChecker {
private _pendingServerHealthChecks: Map<string, PendingServerHealthCheck>;
private _serverInstanceID: string;
private _pubSubManager: IPubSubManager;

constructor(
private _server: GristServer
) {
this._pubSubManager = createPubSubManager(process.env.REDIS_URL);
this._pendingServerHealthChecks = new Map<string, PendingServerHealthCheck>();
this._serverInstanceID = process.env.GRIST_INSTANCE_ID || `testInsanceId_${this._server.getHost()}`;
this._pubSubManager.getClient()?.sadd('grist-instances', this._serverInstanceID).catch((err) => {
log.error('Failed to contact redis', err);
});
this._subscribeToChannels();

// Make sure we clean up our Redis mess, if any, even if we exit
// by signal.
shutdown.addCleanupHandler(null, () => this.close());
}


/** This returns a promise that resolves to `true` when all other
* registered instances must respond as healthy within the given
* timeout.
*
* @param {number} timeout - number of milliseconds to wait for
* responses from all servers before timeout
*
* @param {boolean} checkReady - whether to insist on `ready` status
* or just a simple health check
*/
public async allServersOkay(timeout: number, checkReady: boolean): Promise<boolean> {
const requestId = uuidv4();
const client = this._pubSubManager.getClient();

// If there is no Redis, then our current instance is the only instance
const allInstances = await client?.smembers('grist-instances') || [this._serverInstanceID];

const allInstancesPromise: Promise<boolean> = new Promise((resolve: (res: boolean) => void, reject) => {
const allInstancesTimeout = setTimeout(() => {
log.warn('allServersOkay: timeout waiting for responses');
reject(new Error('Timeout waiting for health responses'));
this._pendingServerHealthChecks.delete(requestId);
}, timeout);

this._pendingServerHealthChecks.set(requestId, {
responses: {},
expectedCount: allInstances.length,
resolve,
reject,
timeout: allInstancesTimeout,
});
}).catch(() => false);
const request: ServerHealthcheckRequest = {
id: requestId,
instanceId: this._serverInstanceID,
checkReady,
};
await this._pubSubManager.publish('healthcheck:requests', JSON.stringify(request));
return allInstancesPromise;
}

public async close() {
await this._pubSubManager.getClient()?.srem('grist-instances', [this._serverInstanceID]);
await this._pubSubManager.close();
}

private _subscribeToChannels() {
this._pubSubManager.subscribe('healthcheck:requests', async (message) => {

Check failure on line 107 in app/server/lib/HealthChecker.ts

View workflow job for this annotation

GitHub Actions / build_and_test (ubuntu-24.04, 3.11, 22.x, :lint:python:client:common:smoke:stubs:pyodide:)

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator

Check failure on line 107 in app/server/lib/HealthChecker.ts

View workflow job for this annotation

GitHub Actions / build_and_test (:lint:python:client:common:smoke:, 22.x, 3.10, ubuntu-24.04)

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator
const request: ServerHealthcheckRequest = JSON.parse(message);
const response: ServerHealthcheckResponse = {
instanceId: this._serverInstanceID|| '',
requestId: request.id,
healthy: !request.checkReady || this._server.ready,
};
log.debug('allServersOkay request', response);
await this._pubSubManager.publish(`healthcheck:responses-${request.instanceId}`, JSON.stringify(response));
});

this._pubSubManager.subscribe(`healthcheck:responses-${this._serverInstanceID}`, (message) => {

Check failure on line 118 in app/server/lib/HealthChecker.ts

View workflow job for this annotation

GitHub Actions / build_and_test (ubuntu-24.04, 3.11, 22.x, :lint:python:client:common:smoke:stubs:pyodide:)

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator

Check failure on line 118 in app/server/lib/HealthChecker.ts

View workflow job for this annotation

GitHub Actions / build_and_test (:lint:python:client:common:smoke:, 22.x, 3.10, ubuntu-24.04)

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator
const response: ServerHealthcheckResponse = JSON.parse(message);
const pending = this._pendingServerHealthChecks.get(response.requestId);
if (!pending) {
// This instance didn't broadcast a health check request with
// this requestId, so nothing to do.
return;
}

pending.responses[response.instanceId] = response.healthy;
log.debug(
`allServersOkay cleared pending response on ${this._serverInstanceID} for ${response.instanceId}`
);

if (Object.keys(pending.responses).length === pending.expectedCount) {
// All servers have replied. Make it known and clean up.
clearTimeout(pending.timeout);
pending.resolve(Object.values(pending.responses).every(e => e));
this._pendingServerHealthChecks.delete(response.requestId);
}
});
}
}
10 changes: 10 additions & 0 deletions app/server/lib/PubSubManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ export interface IPubSubManager {
subscribe(channel: string, callback: Callback): UnsubscribeCallbackPromise;
publish(channel: string, message: string): Promise<void>;
publishBatch(batch: Array<{channel: string, message: string}>): Promise<void>;
getClient(): IORedis|undefined;
}

export type Callback = (message: string) => void;
Expand Down Expand Up @@ -106,6 +107,8 @@ abstract class PubSubManagerBase implements IPubSubManager {
*/
public abstract publishBatch(batch: Array<{channel: string, message: string}>): Promise<void>;

public abstract getClient(): IORedis|undefined;

protected abstract _redisSubscribe(channel: string): Promise<void>;
protected abstract _redisUnsubscribe(channel: string): Promise<void>;

Expand Down Expand Up @@ -133,6 +136,7 @@ class PubSubManagerNoRedis extends PubSubManagerBase {
public async publishBatch(batch: Array<{channel: string, message: string}>) {
batch.forEach(({channel, message}) => this._deliverMessage(channel, message));
}
public getClient(): IORedis|undefined { return; }
protected async _redisSubscribe(channel: string): Promise<void> {}
protected async _redisUnsubscribe(channel: string): Promise<void> {}
}
Expand Down Expand Up @@ -182,6 +186,12 @@ class PubSubManagerRedis extends PubSubManagerBase {
await pipeline.exec();
}

public getClient(): IORedis|undefined {
// The redisSub client is already tied listening to a channel, but
// the redisPub is "free" for the client to mess around with.
return this._redisPub;
}

protected async _redisSubscribe(channel: string): Promise<void> {
await this._redisSub.subscribe(this._prefixChannel(channel));
}
Expand Down
2 changes: 1 addition & 1 deletion app/server/lib/attachEarlyEndpoints.ts
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ export function attachEarlyEndpoints(options: AttachOptions) {
});
}
// We're going down, so we're no longer ready to serve requests.
gristServer.setReady(false);
gristServer.ready = false;
return res.status(200).send({ msg: "ok" });
})
);
Expand Down
18 changes: 5 additions & 13 deletions test/gen-server/lib/HealthCheck.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { assert } from 'chai';
import fetch from 'node-fetch';
import { TestServer } from 'test/gen-server/apiUtils';
import { TcpForwarder } from 'test/server/tcpForwarder';
import { RedisForwarder } from 'test/server/tcpForwarder';
import * as testUtils from 'test/server/testUtils';
import { waitForIt } from 'test/server/wait';

Expand All @@ -12,22 +12,14 @@ describe('HealthCheck', function() {
describe(serverType, function() {
let server: TestServer;
let oldEnv: testUtils.EnvironmentSnapshot;
let redisForwarder: TcpForwarder;
let redisForwarder: RedisForwarder;

before(async function() {
oldEnv = new testUtils.EnvironmentSnapshot();

// We set up Redis via a TcpForwarder, so that we can simulate disconnects.
if (!process.env.TEST_REDIS_URL) {
throw new Error("TEST_REDIS_URL is expected");
}
const redisUrl = new URL(process.env.TEST_REDIS_URL);
const redisPort = parseInt(redisUrl.port, 10) || 6379;
redisForwarder = new TcpForwarder(redisPort, redisUrl.host);
const forwarderPort = await redisForwarder.pickForwarderPort();
await redisForwarder.connect();

process.env.REDIS_URL = `redis://localhost:${forwarderPort}`;
// We set up Redis via a forwarder, so that we can simulate disconnects.
redisForwarder = await RedisForwarder.create();
process.env.REDIS_URL = `redis://localhost:${redisForwarder.port}`;
server = new TestServer(this);
await server.start([serverType]);
});
Expand Down
2 changes: 1 addition & 1 deletion test/server/lib/Authorizer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ async function activateServer(home: FlexServer, docManager: DocManager) {
home.addApiErrorHandlers();
home.finalizeEndpoints();
await home.finalizePlugins(null);
home.setReady(true);
home.ready = true;
serverUrl = home.getOwnUrl();
}

Expand Down
Loading
Loading