7123 lines
246 KiB
JavaScript
7123 lines
246 KiB
JavaScript
|
|
// We are modularizing this manually because the current modularize setting in Emscripten has some issues:
|
|
// https://github.com/kripken/emscripten/issues/5820
|
|
// In addition, When you use emcc's modularization, it still expects to export a global object called `Module`,
|
|
// which is able to be used/called before the WASM is loaded.
|
|
// The modularization below exports a promise that loads and resolves to the actual sql.js module.
|
|
// That way, this module can't be used before the WASM is finished loading.
|
|
|
|
// We are going to define a function that a user will call to start loading initializing our Sql.js library
|
|
// However, that function might be called multiple times, and on subsequent calls, we don't actually want it to instantiate a new instance of the Module
|
|
// Instead, we want to return the previously loaded module
|
|
|
|
// TODO: Make this not declare a global if used in the browser
|
|
var initSqlJsPromise = undefined;
|
|
|
|
var initSqlJs = function (moduleConfig) {
|
|
|
|
if (initSqlJsPromise){
|
|
return initSqlJsPromise;
|
|
}
|
|
// If we're here, we've never called this function before
|
|
initSqlJsPromise = new Promise(function (resolveModule, reject) {
|
|
|
|
// We are modularizing this manually because the current modularize setting in Emscripten has some issues:
|
|
// https://github.com/kripken/emscripten/issues/5820
|
|
|
|
// The way to affect the loading of emcc compiled modules is to create a variable called `Module` and add
|
|
// properties to it, like `preRun`, `postRun`, etc
|
|
// We are using that to get notified when the WASM has finished loading.
|
|
// Only then will we return our promise
|
|
|
|
// If they passed in a moduleConfig object, use that
|
|
// Otherwise, initialize Module to the empty object
|
|
var Module = typeof moduleConfig !== 'undefined' ? moduleConfig : {};
|
|
|
|
// EMCC only allows for a single onAbort function (not an array of functions)
|
|
// So if the user defined their own onAbort function, we remember it and call it
|
|
var originalOnAbortFunction = Module['onAbort'];
|
|
Module['onAbort'] = function (errorThatCausedAbort) {
|
|
reject(new Error(errorThatCausedAbort));
|
|
if (originalOnAbortFunction){
|
|
originalOnAbortFunction(errorThatCausedAbort);
|
|
}
|
|
};
|
|
|
|
Module['postRun'] = Module['postRun'] || [];
|
|
Module['postRun'].push(function () {
|
|
// When Emscripted calls postRun, this promise resolves with the built Module
|
|
resolveModule(Module);
|
|
});
|
|
|
|
// There is a section of code in the emcc-generated code below that looks like this:
|
|
// (Note that this is lowercase `module`)
|
|
// if (typeof module !== 'undefined') {
|
|
// module['exports'] = Module;
|
|
// }
|
|
// When that runs, it's going to overwrite our own modularization export efforts in shell-post.js!
|
|
// The only way to tell emcc not to emit it is to pass the MODULARIZE=1 or MODULARIZE_INSTANCE=1 flags,
|
|
// but that carries with it additional unnecessary baggage/bugs we don't want either.
|
|
// So, we have three options:
|
|
// 1) We undefine `module`
|
|
// 2) We remember what `module['exports']` was at the beginning of this function and we restore it later
|
|
// 3) We write a script to remove those lines of code as part of the Make process.
|
|
//
|
|
// Since those are the only lines of code that care about module, we will undefine it. It's the most straightforward
|
|
// of the options, and has the side effect of reducing emcc's efforts to modify the module if its output were to change in the future.
|
|
// That's a nice side effect since we're handling the modularization efforts ourselves
|
|
module = undefined;
|
|
|
|
// The emcc-generated code and shell-post.js code goes below,
|
|
// meaning that all of it runs inside of this promise. If anything throws an exception, our promise will abort
|
|
// include: shell.js
|
|
// include: minimum_runtime_check.js
|
|
(function() {
|
|
// "30.0.0" -> 300000
|
|
function humanReadableVersionToPacked(str) {
|
|
str = str.split('-')[0]; // Remove any trailing part from e.g. "12.53.3-alpha"
|
|
var vers = str.split('.').slice(0, 3);
|
|
while(vers.length < 3) vers.push('00');
|
|
vers = vers.map((n, i, arr) => n.padStart(2, '0'));
|
|
return vers.join('');
|
|
}
|
|
// 300000 -> "30.0.0"
|
|
var packedVersionToHumanReadable = n => [n / 10000 | 0, (n / 100 | 0) % 100, n % 100].join('.');
|
|
|
|
var TARGET_NOT_SUPPORTED = 2147483647;
|
|
|
|
// Note: We use a typeof check here instead of optional chaining using
|
|
// globalThis because older browsers might not have globalThis defined.
|
|
var currentNodeVersion = typeof process !== 'undefined' && process.versions?.node ? humanReadableVersionToPacked(process.versions.node) : TARGET_NOT_SUPPORTED;
|
|
if (currentNodeVersion < TARGET_NOT_SUPPORTED) {
|
|
throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)');
|
|
}
|
|
if (currentNodeVersion < 2147483647) {
|
|
throw new Error(`This emscripten-generated code requires node v${ packedVersionToHumanReadable(2147483647) } (detected v${packedVersionToHumanReadable(currentNodeVersion)})`);
|
|
}
|
|
|
|
var userAgent = typeof navigator !== 'undefined' && navigator.userAgent;
|
|
if (!userAgent) {
|
|
return;
|
|
}
|
|
|
|
var currentSafariVersion = userAgent.includes("Safari/") && !userAgent.includes("Chrome/") && userAgent.match(/Version\/(\d+\.?\d*\.?\d*)/) ? humanReadableVersionToPacked(userAgent.match(/Version\/(\d+\.?\d*\.?\d*)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentSafariVersion < 150000) {
|
|
throw new Error(`This emscripten-generated code requires Safari v${ packedVersionToHumanReadable(150000) } (detected v${currentSafariVersion})`);
|
|
}
|
|
|
|
var currentFirefoxVersion = userAgent.match(/Firefox\/(\d+(?:\.\d+)?)/) ? parseFloat(userAgent.match(/Firefox\/(\d+(?:\.\d+)?)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentFirefoxVersion < 79) {
|
|
throw new Error(`This emscripten-generated code requires Firefox v79 (detected v${currentFirefoxVersion})`);
|
|
}
|
|
|
|
var currentChromeVersion = userAgent.match(/Chrome\/(\d+(?:\.\d+)?)/) ? parseFloat(userAgent.match(/Chrome\/(\d+(?:\.\d+)?)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentChromeVersion < 85) {
|
|
throw new Error(`This emscripten-generated code requires Chrome v85 (detected v${currentChromeVersion})`);
|
|
}
|
|
})();
|
|
|
|
// end include: minimum_runtime_check.js
|
|
// The Module object: Our interface to the outside world. We import
|
|
// and export values on it. There are various ways Module can be used:
|
|
// 1. Not defined. We create it here
|
|
// 2. A function parameter, function(moduleArg) => Promise<Module>
|
|
// 3. pre-run appended it, var Module = {}; ..generated code..
|
|
// 4. External script tag defines var Module.
|
|
// We need to check if Module already exists (e.g. case 3 above).
|
|
// Substitution will be replaced with actual code on later stage of the build,
|
|
// this way Closure Compiler will not mangle it (e.g. case 4. above).
|
|
// Note that if you want to run closure, and also to use Module
|
|
// after the generated code, you will need to define var Module = {};
|
|
// before the code. Then that object will be used in the code, and you
|
|
// can continue to use Module afterwards as well.
|
|
var Module = typeof Module != 'undefined' ? Module : {};
|
|
|
|
// Determine the runtime environment we are in. You can customize this by
|
|
// setting the ENVIRONMENT setting at compile time (see settings.js).
|
|
|
|
// Attempt to auto-detect the environment
|
|
var ENVIRONMENT_IS_WEB = !!globalThis.window;
|
|
var ENVIRONMENT_IS_WORKER = !!globalThis.WorkerGlobalScope;
|
|
// N.b. Electron.js environment is simultaneously a NODE-environment, but
|
|
// also a web environment.
|
|
var ENVIRONMENT_IS_NODE = globalThis.process?.versions?.node && globalThis.process?.type != 'renderer';
|
|
var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
|
|
|
|
// --pre-jses are emitted after the Module integration code, so that they can
|
|
// refer to Module (if they choose; they can also define Module)
|
|
// include: src/api.js
|
|
/* global
|
|
FS
|
|
HEAP8
|
|
Module
|
|
_malloc
|
|
_free
|
|
getValue
|
|
setValue
|
|
stackAlloc
|
|
stackRestore
|
|
stackSave
|
|
UTF8ToString
|
|
stringToNewUTF8
|
|
removeFunction
|
|
addFunction
|
|
writeArrayToMemory
|
|
*/
|
|
|
|
"use strict";
|
|
|
|
/**
|
|
* @typedef {{Database:Database, Statement:Statement}} SqlJs
|
|
* @property {Database} Database A class that represents an SQLite database
|
|
* @property {Statement} Statement The prepared statement class
|
|
*/
|
|
|
|
/**
|
|
* @typedef {{locateFile:function(string):string}} SqlJsConfig
|
|
* @property {function(string):string} locateFile
|
|
* a function that returns the full path to a resource given its file name
|
|
* @see https://emscripten.org/docs/api_reference/module.html
|
|
*/
|
|
|
|
/**
|
|
* Asynchronously initializes sql.js
|
|
* @function initSqlJs
|
|
* @param {SqlJsConfig} config module inititialization parameters
|
|
* @returns {SqlJs}
|
|
* @example
|
|
* initSqlJs({
|
|
* locateFile: name => '/path/to/assets/' + name
|
|
* }).then(SQL => {
|
|
* const db = new SQL.Database();
|
|
* const result = db.exec("select 'hello world'");
|
|
* console.log(result);
|
|
* })
|
|
*/
|
|
|
|
/**
|
|
* @module SqlJs
|
|
*/
|
|
// Wait for preRun to run, and then finish our initialization
|
|
Module["onRuntimeInitialized"] = function onRuntimeInitialized() {
|
|
// Declare toplevel variables
|
|
// register, used for temporary stack values
|
|
var apiTemp = stackAlloc(4);
|
|
var cwrap = Module["cwrap"];
|
|
// Null pointer
|
|
var NULL = 0;
|
|
// SQLite enum
|
|
var SQLITE_OK = 0;
|
|
var SQLITE_ROW = 100;
|
|
var SQLITE_DONE = 101;
|
|
var SQLITE_INTEGER = 1;
|
|
var SQLITE_FLOAT = 2;
|
|
var SQLITE_TEXT = 3;
|
|
var SQLITE_BLOB = 4;
|
|
// var - Encodings, used for registering functions.
|
|
var SQLITE_UTF8 = 1;
|
|
// var - Authorizer Action Codes used to identify change types in updateHook
|
|
var SQLITE_INSERT = 18;
|
|
var SQLITE_UPDATE = 23;
|
|
var SQLITE_DELETE = 9;
|
|
// var - cwrap function
|
|
var sqlite3_open = cwrap("sqlite3_open", "number", ["string", "number"]);
|
|
var sqlite3_close_v2 = cwrap("sqlite3_close_v2", "number", ["number"]);
|
|
var sqlite3_exec = cwrap(
|
|
"sqlite3_exec",
|
|
"number",
|
|
["number", "string", "number", "number", "number"]
|
|
);
|
|
var sqlite3_changes = cwrap("sqlite3_changes", "number", ["number"]);
|
|
var sqlite3_prepare_v2 = cwrap(
|
|
"sqlite3_prepare_v2",
|
|
"number",
|
|
["number", "string", "number", "number", "number"]
|
|
);
|
|
var sqlite3_sql = cwrap("sqlite3_sql", "string", ["number"]);
|
|
var sqlite3_normalized_sql = cwrap(
|
|
"sqlite3_normalized_sql",
|
|
"string",
|
|
["number"]
|
|
);
|
|
var sqlite3_prepare_v2_sqlptr = cwrap(
|
|
"sqlite3_prepare_v2",
|
|
"number",
|
|
["number", "number", "number", "number", "number"]
|
|
);
|
|
var sqlite3_bind_text = cwrap(
|
|
"sqlite3_bind_text",
|
|
"number",
|
|
["number", "number", "number", "number", "number"]
|
|
);
|
|
var sqlite3_bind_blob = cwrap(
|
|
"sqlite3_bind_blob",
|
|
"number",
|
|
["number", "number", "number", "number", "number"]
|
|
);
|
|
var sqlite3_bind_double = cwrap(
|
|
"sqlite3_bind_double",
|
|
"number",
|
|
["number", "number", "number"]
|
|
);
|
|
var sqlite3_bind_int = cwrap(
|
|
"sqlite3_bind_int",
|
|
"number",
|
|
["number", "number", "number"]
|
|
);
|
|
|
|
var sqlite3_bind_parameter_index = cwrap(
|
|
"sqlite3_bind_parameter_index",
|
|
"number",
|
|
["number", "string"]
|
|
);
|
|
var sqlite3_step = cwrap("sqlite3_step", "number", ["number"]);
|
|
var sqlite3_errmsg = cwrap("sqlite3_errmsg", "string", ["number"]);
|
|
var sqlite3_column_count = cwrap(
|
|
"sqlite3_column_count",
|
|
"number",
|
|
["number"]
|
|
);
|
|
var sqlite3_data_count = cwrap("sqlite3_data_count", "number", ["number"]);
|
|
var sqlite3_column_double = cwrap(
|
|
"sqlite3_column_double",
|
|
"number",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_column_text = cwrap(
|
|
"sqlite3_column_text",
|
|
"string",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_column_blob = cwrap(
|
|
"sqlite3_column_blob",
|
|
"number",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_column_bytes = cwrap(
|
|
"sqlite3_column_bytes",
|
|
"number",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_column_type = cwrap(
|
|
"sqlite3_column_type",
|
|
"number",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_column_name = cwrap(
|
|
"sqlite3_column_name",
|
|
"string",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_reset = cwrap("sqlite3_reset", "number", ["number"]);
|
|
var sqlite3_clear_bindings = cwrap(
|
|
"sqlite3_clear_bindings",
|
|
"number",
|
|
["number"]
|
|
);
|
|
var sqlite3_finalize = cwrap("sqlite3_finalize", "number", ["number"]);
|
|
var sqlite3_create_function_v2 = cwrap(
|
|
"sqlite3_create_function_v2",
|
|
"number",
|
|
[
|
|
"number",
|
|
"string",
|
|
"number",
|
|
"number",
|
|
"number",
|
|
"number",
|
|
"number",
|
|
"number",
|
|
"number"
|
|
]
|
|
);
|
|
var sqlite3_value_type = cwrap("sqlite3_value_type", "number", ["number"]);
|
|
var sqlite3_value_bytes = cwrap(
|
|
"sqlite3_value_bytes",
|
|
"number",
|
|
["number"]
|
|
);
|
|
var sqlite3_value_text = cwrap("sqlite3_value_text", "string", ["number"]);
|
|
var sqlite3_value_blob = cwrap("sqlite3_value_blob", "number", ["number"]);
|
|
var sqlite3_value_double = cwrap(
|
|
"sqlite3_value_double",
|
|
"number",
|
|
["number"]
|
|
);
|
|
var sqlite3_result_double = cwrap(
|
|
"sqlite3_result_double",
|
|
"",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_result_null = cwrap(
|
|
"sqlite3_result_null",
|
|
"",
|
|
["number"]
|
|
);
|
|
var sqlite3_result_text = cwrap(
|
|
"sqlite3_result_text",
|
|
"",
|
|
["number", "string", "number", "number"]
|
|
);
|
|
var sqlite3_result_blob = cwrap(
|
|
"sqlite3_result_blob",
|
|
"",
|
|
["number", "number", "number", "number"]
|
|
);
|
|
var sqlite3_result_int = cwrap(
|
|
"sqlite3_result_int",
|
|
"",
|
|
["number", "number"]
|
|
);
|
|
var sqlite3_result_error = cwrap(
|
|
"sqlite3_result_error",
|
|
"",
|
|
["number", "string", "number"]
|
|
);
|
|
|
|
// https://www.sqlite.org/c3ref/aggregate_context.html
|
|
// void *sqlite3_aggregate_context(sqlite3_context*, int nBytes)
|
|
var sqlite3_aggregate_context = cwrap(
|
|
"sqlite3_aggregate_context",
|
|
"number",
|
|
["number", "number"]
|
|
);
|
|
var registerExtensionFunctions = cwrap(
|
|
"RegisterExtensionFunctions",
|
|
"number",
|
|
["number"]
|
|
);
|
|
|
|
var sqlite3_update_hook = cwrap(
|
|
"sqlite3_update_hook",
|
|
"number",
|
|
["number", "number", "number"]
|
|
);
|
|
|
|
/**
|
|
* @classdesc
|
|
* Represents a prepared statement.
|
|
* Prepared statements allow you to have a template sql string,
|
|
* that you can execute multiple times with different parameters.
|
|
*
|
|
* You can't instantiate this class directly, you have to use a
|
|
* {@link Database} object in order to create a statement.
|
|
*
|
|
* **Warnings**
|
|
* 1. When you close a database (using db.close()), all
|
|
* its statements are closed too and become unusable.
|
|
* 1. After calling db.prepare() you must manually free the assigned memory
|
|
* by calling Statement.free(). Failure to do this will cause subsequent
|
|
* 'DROP TABLE ...' statements to fail with 'Uncaught Error: database table
|
|
* is locked'.
|
|
*
|
|
* Statements can't be created by the API user directly, only by
|
|
* Database::prepare
|
|
*
|
|
* @see Database.html#prepare-dynamic
|
|
* @see https://en.wikipedia.org/wiki/Prepared_statement
|
|
*
|
|
* @constructs Statement
|
|
* @memberof module:SqlJs
|
|
* @param {number} stmt1 The SQLite statement reference
|
|
* @param {Database} db The database from which this statement was created
|
|
*/
|
|
function Statement(stmt1, db) {
|
|
this.stmt = stmt1;
|
|
this.db = db;
|
|
// Index of the leftmost parameter is 1
|
|
this.pos = 1;
|
|
// Pointers to allocated memory, that need to be freed
|
|
// when the statemend is destroyed
|
|
this.allocatedmem = [];
|
|
}
|
|
|
|
/** @typedef {string|number|null|Uint8Array} Database.SqlValue */
|
|
/** @typedef {
|
|
Array<Database.SqlValue>|Object<string, Database.SqlValue>|null
|
|
} Statement.BindParams
|
|
*/
|
|
|
|
/** Bind values to the parameters, after having reseted the statement.
|
|
* If values is null, do nothing and return true.
|
|
*
|
|
* SQL statements can have parameters,
|
|
* named *'?', '?NNN', ':VVV', '@VVV', '$VVV'*,
|
|
* where NNN is a number and VVV a string.
|
|
* This function binds these parameters to the given values.
|
|
*
|
|
* *Warning*: ':', '@', and '$' are included in the parameters names
|
|
*
|
|
* ## Value types
|
|
* Javascript type | SQLite type
|
|
* -----------------| -----------
|
|
* number | REAL, INTEGER
|
|
* boolean | INTEGER
|
|
* string | TEXT
|
|
* Array, Uint8Array| BLOB
|
|
* null | NULL
|
|
*
|
|
* @example <caption>Bind values to named parameters</caption>
|
|
* var stmt = db.prepare(
|
|
* "UPDATE test SET a=@newval WHERE id BETWEEN $mini AND $maxi"
|
|
* );
|
|
* stmt.bind({$mini:10, $maxi:20, '@newval':5});
|
|
*
|
|
* @example <caption>Bind values to anonymous parameters</caption>
|
|
* // Create a statement that contains parameters like '?', '?NNN'
|
|
* var stmt = db.prepare("UPDATE test SET a=? WHERE id BETWEEN ? AND ?");
|
|
* // Call Statement.bind with an array as parameter
|
|
* stmt.bind([5, 10, 20]);
|
|
*
|
|
* @see http://www.sqlite.org/datatype3.html
|
|
* @see http://www.sqlite.org/lang_expr.html#varparam
|
|
|
|
* @param {Statement.BindParams} values The values to bind
|
|
* @return {boolean} true if it worked
|
|
* @throws {String} SQLite Error
|
|
*/
|
|
Statement.prototype["bind"] = function bind(values) {
|
|
if (!this.stmt) {
|
|
throw "Statement closed";
|
|
}
|
|
this["reset"]();
|
|
if (Array.isArray(values)) return this.bindFromArray(values);
|
|
if (values != null && typeof values === "object") {
|
|
return this.bindFromObject(values);
|
|
}
|
|
return true;
|
|
};
|
|
|
|
/** Execute the statement, fetching the the next line of result,
|
|
that can be retrieved with {@link Statement.get}.
|
|
|
|
@return {boolean} true if a row of result available
|
|
@throws {String} SQLite Error
|
|
*/
|
|
Statement.prototype["step"] = function step() {
|
|
if (!this.stmt) {
|
|
throw "Statement closed";
|
|
}
|
|
this.pos = 1;
|
|
var ret = sqlite3_step(this.stmt);
|
|
switch (ret) {
|
|
case SQLITE_ROW:
|
|
return true;
|
|
case SQLITE_DONE:
|
|
return false;
|
|
default:
|
|
throw this.db.handleError(ret);
|
|
}
|
|
};
|
|
|
|
/*
|
|
Internal methods to retrieve data from the results of a statement
|
|
that has been executed
|
|
*/
|
|
Statement.prototype.getNumber = function getNumber(pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
return sqlite3_column_double(this.stmt, pos);
|
|
};
|
|
|
|
Statement.prototype.getBigInt = function getBigInt(pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
var text = sqlite3_column_text(this.stmt, pos);
|
|
if (typeof BigInt !== "function") {
|
|
throw new Error("BigInt is not supported");
|
|
}
|
|
/* global BigInt */
|
|
return BigInt(text);
|
|
};
|
|
|
|
Statement.prototype.getString = function getString(pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
return sqlite3_column_text(this.stmt, pos);
|
|
};
|
|
|
|
Statement.prototype.getBlob = function getBlob(pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
var size = sqlite3_column_bytes(this.stmt, pos);
|
|
var ptr = sqlite3_column_blob(this.stmt, pos);
|
|
var result = new Uint8Array(size);
|
|
for (var i = 0; i < size; i += 1) {
|
|
result[i] = HEAP8[ptr + i];
|
|
}
|
|
return result;
|
|
};
|
|
|
|
/** Get one row of results of a statement.
|
|
If the first parameter is not provided, step must have been called before.
|
|
@param {Statement.BindParams} [params] If set, the values will be bound
|
|
to the statement before it is executed
|
|
@return {Array<Database.SqlValue>} One row of result
|
|
|
|
@example
|
|
<caption>Print all the rows of the table test to the console</caption>
|
|
var stmt = db.prepare("SELECT * FROM test");
|
|
while (stmt.step()) console.log(stmt.get());
|
|
|
|
<caption>Enable BigInt support</caption>
|
|
var stmt = db.prepare("SELECT * FROM test");
|
|
while (stmt.step()) console.log(stmt.get(null, {useBigInt: true}));
|
|
*/
|
|
Statement.prototype["get"] = function get(params, config) {
|
|
config = config || {};
|
|
if (params != null && this["bind"](params)) {
|
|
this["step"]();
|
|
}
|
|
var results1 = [];
|
|
var ref = sqlite3_data_count(this.stmt);
|
|
for (var field = 0; field < ref; field += 1) {
|
|
switch (sqlite3_column_type(this.stmt, field)) {
|
|
case SQLITE_INTEGER:
|
|
var getfunc = config["useBigInt"]
|
|
? this.getBigInt(field)
|
|
: this.getNumber(field);
|
|
results1.push(getfunc);
|
|
break;
|
|
case SQLITE_FLOAT:
|
|
results1.push(this.getNumber(field));
|
|
break;
|
|
case SQLITE_TEXT:
|
|
results1.push(this.getString(field));
|
|
break;
|
|
case SQLITE_BLOB:
|
|
results1.push(this.getBlob(field));
|
|
break;
|
|
default:
|
|
results1.push(null);
|
|
}
|
|
}
|
|
return results1;
|
|
};
|
|
|
|
/** Get the list of column names of a row of result of a statement.
|
|
@return {Array<string>} The names of the columns
|
|
@example
|
|
var stmt = db.prepare(
|
|
"SELECT 5 AS nbr, x'616200' AS data, NULL AS null_value;"
|
|
);
|
|
stmt.step(); // Execute the statement
|
|
console.log(stmt.getColumnNames());
|
|
// Will print ['nbr','data','null_value']
|
|
*/
|
|
Statement.prototype["getColumnNames"] = function getColumnNames() {
|
|
var results1 = [];
|
|
var ref = sqlite3_column_count(this.stmt);
|
|
for (var i = 0; i < ref; i += 1) {
|
|
results1.push(sqlite3_column_name(this.stmt, i));
|
|
}
|
|
return results1;
|
|
};
|
|
|
|
/** Get one row of result as a javascript object, associating column names
|
|
with their value in the current row.
|
|
@param {Statement.BindParams} [params] If set, the values will be bound
|
|
to the statement, and it will be executed
|
|
@return {Object<string, Database.SqlValue>} The row of result
|
|
@see {@link Statement.get}
|
|
|
|
@example
|
|
|
|
var stmt = db.prepare(
|
|
"SELECT 5 AS nbr, x'010203' AS data, NULL AS null_value;"
|
|
);
|
|
stmt.step(); // Execute the statement
|
|
console.log(stmt.getAsObject());
|
|
// Will print {nbr:5, data: Uint8Array([1,2,3]), null_value:null}
|
|
*/
|
|
Statement.prototype["getAsObject"] = function getAsObject(params, config) {
|
|
var values = this["get"](params, config);
|
|
var names = this["getColumnNames"]();
|
|
var rowObject = {};
|
|
for (var i = 0; i < names.length; i += 1) {
|
|
var name = names[i];
|
|
rowObject[name] = values[i];
|
|
}
|
|
return rowObject;
|
|
};
|
|
|
|
/** Get the SQL string used in preparing this statement.
|
|
@return {string} The SQL string
|
|
*/
|
|
Statement.prototype["getSQL"] = function getSQL() {
|
|
return sqlite3_sql(this.stmt);
|
|
};
|
|
|
|
/** Get the SQLite's normalized version of the SQL string used in
|
|
preparing this statement. The meaning of "normalized" is not
|
|
well-defined: see {@link https://sqlite.org/c3ref/expanded_sql.html
|
|
the SQLite documentation}.
|
|
|
|
@example
|
|
db.run("create table test (x integer);");
|
|
stmt = db.prepare("select * from test where x = 42");
|
|
// returns "SELECT*FROM test WHERE x=?;"
|
|
|
|
@return {string} The normalized SQL string
|
|
*/
|
|
Statement.prototype["getNormalizedSQL"] = function getNormalizedSQL() {
|
|
return sqlite3_normalized_sql(this.stmt);
|
|
};
|
|
|
|
/** Shorthand for bind + step + reset
|
|
Bind the values, execute the statement, ignoring the rows it returns,
|
|
and resets it
|
|
@param {Statement.BindParams} [values] Value to bind to the statement
|
|
*/
|
|
Statement.prototype["run"] = function run(values) {
|
|
if (values != null) {
|
|
this["bind"](values);
|
|
}
|
|
this["step"]();
|
|
return this["reset"]();
|
|
};
|
|
|
|
Statement.prototype.bindString = function bindString(string, pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
var strptr = stringToNewUTF8(string);
|
|
this.allocatedmem.push(strptr);
|
|
this.db.handleError(sqlite3_bind_text(
|
|
this.stmt,
|
|
pos,
|
|
strptr,
|
|
-1,
|
|
0
|
|
));
|
|
return true;
|
|
};
|
|
|
|
Statement.prototype.bindBlob = function bindBlob(array, pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
var blobptr = _malloc(array.length);
|
|
writeArrayToMemory(array, blobptr);
|
|
this.allocatedmem.push(blobptr);
|
|
this.db.handleError(sqlite3_bind_blob(
|
|
this.stmt,
|
|
pos,
|
|
blobptr,
|
|
array.length,
|
|
0
|
|
));
|
|
return true;
|
|
};
|
|
|
|
Statement.prototype.bindNumber = function bindNumber(num, pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
var bindfunc = (
|
|
num === (num | 0)
|
|
? sqlite3_bind_int
|
|
: sqlite3_bind_double
|
|
);
|
|
this.db.handleError(bindfunc(this.stmt, pos, num));
|
|
return true;
|
|
};
|
|
|
|
Statement.prototype.bindNull = function bindNull(pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
return sqlite3_bind_blob(this.stmt, pos, 0, 0, 0) === SQLITE_OK;
|
|
};
|
|
|
|
Statement.prototype.bindValue = function bindValue(val, pos) {
|
|
if (pos == null) {
|
|
pos = this.pos;
|
|
this.pos += 1;
|
|
}
|
|
|
|
switch (typeof val) {
|
|
case "string":
|
|
return this.bindString(val, pos);
|
|
case "number":
|
|
return this.bindNumber(val + 0, pos);
|
|
case "bigint":
|
|
// BigInt is not fully supported yet at WASM level.
|
|
return this.bindString(val.toString(), pos);
|
|
case "boolean":
|
|
return this.bindNumber(val + 0, pos);
|
|
case "object":
|
|
if (val === null) {
|
|
return this.bindNull(pos);
|
|
}
|
|
if (val.length != null) {
|
|
return this.bindBlob(val, pos);
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
throw (
|
|
"Wrong API use : tried to bind a value of an unknown type ("
|
|
+ val + ")."
|
|
);
|
|
};
|
|
|
|
/** Bind names and values of an object to the named parameters of the
|
|
statement
|
|
@param {Object<string, Database.SqlValue>} valuesObj
|
|
@private
|
|
@nodoc
|
|
*/
|
|
Statement.prototype.bindFromObject = function bindFromObject(valuesObj) {
|
|
var that = this;
|
|
Object.keys(valuesObj).forEach(function each(name) {
|
|
var num = sqlite3_bind_parameter_index(that.stmt, name);
|
|
if (num !== 0) {
|
|
that.bindValue(valuesObj[name], num);
|
|
}
|
|
});
|
|
return true;
|
|
};
|
|
|
|
/** Bind values to numbered parameters
|
|
@param {Array<Database.SqlValue>} values
|
|
@private
|
|
@nodoc
|
|
*/
|
|
Statement.prototype.bindFromArray = function bindFromArray(values) {
|
|
for (var num = 0; num < values.length; num += 1) {
|
|
this.bindValue(values[num], num + 1);
|
|
}
|
|
return true;
|
|
};
|
|
|
|
/** Reset a statement, so that its parameters can be bound to new values
|
|
It also clears all previous bindings, freeing the memory used
|
|
by bound parameters.
|
|
*/
|
|
Statement.prototype["reset"] = function reset() {
|
|
this["freemem"]();
|
|
return (
|
|
sqlite3_clear_bindings(this.stmt) === SQLITE_OK
|
|
&& sqlite3_reset(this.stmt) === SQLITE_OK
|
|
);
|
|
};
|
|
|
|
/** Free the memory allocated during parameter binding */
|
|
Statement.prototype["freemem"] = function freemem() {
|
|
var mem;
|
|
while ((mem = this.allocatedmem.pop()) !== undefined) {
|
|
_free(mem);
|
|
}
|
|
};
|
|
|
|
/** Free the memory used by the statement
|
|
@return {boolean} true in case of success
|
|
*/
|
|
Statement.prototype["free"] = function free() {
|
|
var res;
|
|
this["freemem"]();
|
|
res = sqlite3_finalize(this.stmt) === SQLITE_OK;
|
|
delete this.db.statements[this.stmt];
|
|
this.stmt = NULL;
|
|
return res;
|
|
};
|
|
|
|
/**
|
|
* @classdesc
|
|
* An iterator over multiple SQL statements in a string,
|
|
* preparing and returning a Statement object for the next SQL
|
|
* statement on each iteration.
|
|
*
|
|
* You can't instantiate this class directly, you have to use a
|
|
* {@link Database} object in order to create a statement iterator
|
|
*
|
|
* {@see Database#iterateStatements}
|
|
*
|
|
* @example
|
|
* // loop over and execute statements in string sql
|
|
* for (let statement of db.iterateStatements(sql) {
|
|
* statement.step();
|
|
* // get results, etc.
|
|
* // do not call statement.free() manually, each statement is freed
|
|
* // before the next one is parsed
|
|
* }
|
|
*
|
|
* // capture any bad query exceptions with feedback
|
|
* // on the bad sql
|
|
* let it = db.iterateStatements(sql);
|
|
* try {
|
|
* for (let statement of it) {
|
|
* statement.step();
|
|
* }
|
|
* } catch(e) {
|
|
* console.log(
|
|
* `The SQL string "${it.getRemainingSQL()}" ` +
|
|
* `contains the following error: ${e}`
|
|
* );
|
|
* }
|
|
*
|
|
* @implements {Iterator<Statement>}
|
|
* @implements {Iterable<Statement>}
|
|
* @constructs StatementIterator
|
|
* @memberof module:SqlJs
|
|
* @param {string} sql A string containing multiple SQL statements
|
|
* @param {Database} db The database from which this iterator was created
|
|
*/
|
|
function StatementIterator(sql, db) {
|
|
this.db = db;
|
|
this.sqlPtr = stringToNewUTF8(sql);
|
|
if (this.sqlPtr === null) {
|
|
throw new Error("Unable to allocate memory for the SQL string");
|
|
}
|
|
this.nextSqlPtr = this.sqlPtr;
|
|
this.nextSqlString = null;
|
|
this.activeStatement = null;
|
|
}
|
|
|
|
/**
|
|
* @typedef {{ done:true, value:undefined } |
|
|
* { done:false, value:Statement}}
|
|
* StatementIterator.StatementIteratorResult
|
|
* @property {Statement} value the next available Statement
|
|
* (as returned by {@link Database.prepare})
|
|
* @property {boolean} done true if there are no more available statements
|
|
*/
|
|
|
|
/** Prepare the next available SQL statement
|
|
@return {StatementIterator.StatementIteratorResult}
|
|
@throws {String} SQLite error or invalid iterator error
|
|
*/
|
|
StatementIterator.prototype["next"] = function next() {
|
|
if (this.sqlPtr === null) {
|
|
return { done: true };
|
|
}
|
|
if (this.activeStatement !== null) {
|
|
this.activeStatement["free"]();
|
|
this.activeStatement = null;
|
|
}
|
|
if (!this.db.db) {
|
|
this.finalize();
|
|
throw new Error("Database closed");
|
|
}
|
|
var stack = stackSave();
|
|
var pzTail = stackAlloc(4);
|
|
setValue(apiTemp, 0, "i32");
|
|
setValue(pzTail, 0, "i32");
|
|
try {
|
|
this.db.handleError(sqlite3_prepare_v2_sqlptr(
|
|
this.db.db,
|
|
this.nextSqlPtr,
|
|
-1,
|
|
apiTemp,
|
|
pzTail
|
|
));
|
|
this.nextSqlPtr = getValue(pzTail, "i32");
|
|
var pStmt = getValue(apiTemp, "i32");
|
|
if (pStmt === NULL) {
|
|
this.finalize();
|
|
return { done: true };
|
|
}
|
|
this.activeStatement = new Statement(pStmt, this.db);
|
|
this.db.statements[pStmt] = this.activeStatement;
|
|
return { value: this.activeStatement, done: false };
|
|
} catch (e) {
|
|
this.nextSqlString = UTF8ToString(this.nextSqlPtr);
|
|
this.finalize();
|
|
throw e;
|
|
} finally {
|
|
stackRestore(stack);
|
|
}
|
|
};
|
|
|
|
StatementIterator.prototype.finalize = function finalize() {
|
|
_free(this.sqlPtr);
|
|
this.sqlPtr = null;
|
|
};
|
|
|
|
/** Get any un-executed portions remaining of the original SQL string
|
|
@return {String}
|
|
*/
|
|
StatementIterator.prototype["getRemainingSQL"] = function getRemainder() {
|
|
// iff an exception occurred, we set the nextSqlString
|
|
if (this.nextSqlString !== null) return this.nextSqlString;
|
|
// otherwise, convert from nextSqlPtr
|
|
return UTF8ToString(this.nextSqlPtr);
|
|
};
|
|
|
|
/* implement Iterable interface */
|
|
|
|
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
|
|
StatementIterator.prototype[Symbol.iterator] = function iterator() {
|
|
return this;
|
|
};
|
|
}
|
|
|
|
/** @classdesc
|
|
* Represents an SQLite database
|
|
* @constructs Database
|
|
* @memberof module:SqlJs
|
|
* Open a new database either by creating a new one or opening an existing
|
|
* one stored in the byte array passed in first argument
|
|
* @param {Array<number>} data An array of bytes representing
|
|
* an SQLite database file
|
|
*/
|
|
function Database(data) {
|
|
this.filename = "dbfile_" + (0xffffffff * Math.random() >>> 0);
|
|
if (data != null) {
|
|
FS.createDataFile("/", this.filename, data, true, true);
|
|
}
|
|
this.handleError(sqlite3_open(this.filename, apiTemp));
|
|
this.db = getValue(apiTemp, "i32");
|
|
registerExtensionFunctions(this.db);
|
|
// A list of all prepared statements of the database
|
|
this.statements = {};
|
|
// A list of all user function of the database
|
|
// (created by create_function call)
|
|
this.functions = {};
|
|
}
|
|
|
|
/** Execute an SQL query, ignoring the rows it returns.
|
|
@param {string} sql a string containing some SQL text to execute
|
|
@param {Statement.BindParams} [params] When the SQL statement contains
|
|
placeholders, you can pass them in here. They will be bound to the statement
|
|
before it is executed. If you use the params argument, you **cannot**
|
|
provide an sql string that contains several statements (separated by `;`)
|
|
|
|
@example
|
|
// Insert values in a table
|
|
db.run(
|
|
"INSERT INTO test VALUES (:age, :name)",
|
|
{ ':age' : 18, ':name' : 'John' }
|
|
);
|
|
|
|
@return {Database} The database object (useful for method chaining)
|
|
*/
|
|
Database.prototype["run"] = function run(sql, params) {
|
|
if (!this.db) {
|
|
throw "Database closed";
|
|
}
|
|
if (params) {
|
|
var stmt = this["prepare"](sql, params);
|
|
try {
|
|
stmt["step"]();
|
|
} finally {
|
|
stmt["free"]();
|
|
}
|
|
} else {
|
|
this.handleError(sqlite3_exec(this.db, sql, 0, 0, apiTemp));
|
|
}
|
|
return this;
|
|
};
|
|
|
|
/**
|
|
* @typedef {{
|
|
columns:Array<string>,
|
|
values:Array<Array<Database.SqlValue>>
|
|
}} Database.QueryExecResult
|
|
* @property {Array<string>} columns the name of the columns of the result
|
|
* (as returned by {@link Statement.getColumnNames})
|
|
* @property {
|
|
* Array<Array<Database.SqlValue>>
|
|
* } values one array per row, containing
|
|
* the column values
|
|
*/
|
|
|
|
/** Execute an SQL query, and returns the result.
|
|
*
|
|
* This is a wrapper against
|
|
* {@link Database.prepare},
|
|
* {@link Statement.bind},
|
|
* {@link Statement.step},
|
|
* {@link Statement.get},
|
|
* and {@link Statement.free}.
|
|
*
|
|
* The result is an array of result elements. There are as many result
|
|
* elements as the number of statements in your sql string (statements are
|
|
* separated by a semicolon)
|
|
*
|
|
* ## Example use
|
|
* We will create the following table, named *test* and query it with a
|
|
* multi-line statement using params:
|
|
*
|
|
* | id | age | name |
|
|
* |:--:|:---:|:------:|
|
|
* | 1 | 1 | Ling |
|
|
* | 2 | 18 | Paul |
|
|
*
|
|
* We query it like that:
|
|
* ```javascript
|
|
* var db = new SQL.Database();
|
|
* var res = db.exec(
|
|
* "DROP TABLE IF EXISTS test;\n"
|
|
* + "CREATE TABLE test (id INTEGER, age INTEGER, name TEXT);"
|
|
* + "INSERT INTO test VALUES ($id1, :age1, @name1);"
|
|
* + "INSERT INTO test VALUES ($id2, :age2, @name2);"
|
|
* + "SELECT id FROM test;"
|
|
* + "SELECT age,name FROM test WHERE id=$id1",
|
|
* {
|
|
* "$id1": 1, ":age1": 1, "@name1": "Ling",
|
|
* "$id2": 2, ":age2": 18, "@name2": "Paul"
|
|
* }
|
|
* );
|
|
* ```
|
|
*
|
|
* `res` is now :
|
|
* ```javascript
|
|
* [
|
|
* {"columns":["id"],"values":[[1],[2]]},
|
|
* {"columns":["age","name"],"values":[[1,"Ling"]]}
|
|
* ]
|
|
* ```
|
|
*
|
|
@param {string} sql a string containing some SQL text to execute
|
|
@param {Statement.BindParams} [params] When the SQL statement contains
|
|
placeholders, you can pass them in here. They will be bound to the statement
|
|
before it is executed. If you use the params argument as an array,
|
|
you **cannot** provide an sql string that contains several statements
|
|
(separated by `;`). This limitation does not apply to params as an object.
|
|
* @return {Array<Database.QueryExecResult>} The results of each statement
|
|
*/
|
|
Database.prototype["exec"] = function exec(sql, params, config) {
|
|
if (!this.db) {
|
|
throw "Database closed";
|
|
}
|
|
var stmt = null;
|
|
var originalSqlPtr = null;
|
|
var currentSqlPtr = null;
|
|
try {
|
|
originalSqlPtr = stringToNewUTF8(sql);
|
|
currentSqlPtr = originalSqlPtr;
|
|
var pzTail = stackAlloc(4);
|
|
var results = [];
|
|
while (getValue(currentSqlPtr, "i8") !== NULL) {
|
|
setValue(apiTemp, 0, "i32");
|
|
setValue(pzTail, 0, "i32");
|
|
this.handleError(sqlite3_prepare_v2_sqlptr(
|
|
this.db,
|
|
currentSqlPtr,
|
|
-1,
|
|
apiTemp,
|
|
pzTail
|
|
));
|
|
// pointer to a statement, or null
|
|
var pStmt = getValue(apiTemp, "i32");
|
|
currentSqlPtr = getValue(pzTail, "i32");
|
|
// Empty statement
|
|
if (pStmt !== NULL) {
|
|
var curresult = null;
|
|
stmt = new Statement(pStmt, this);
|
|
if (params != null) {
|
|
stmt.bind(params);
|
|
}
|
|
while (stmt["step"]()) {
|
|
if (curresult === null) {
|
|
curresult = {
|
|
columns: stmt["getColumnNames"](),
|
|
values: [],
|
|
};
|
|
results.push(curresult);
|
|
}
|
|
curresult["values"].push(stmt["get"](null, config));
|
|
}
|
|
stmt["free"]();
|
|
}
|
|
}
|
|
return results;
|
|
} catch (errCaught) {
|
|
if (stmt) stmt["free"]();
|
|
throw errCaught;
|
|
} finally {
|
|
if (originalSqlPtr) _free(originalSqlPtr);
|
|
}
|
|
};
|
|
|
|
/** Execute an sql statement, and call a callback for each row of result.
|
|
|
|
Currently this method is synchronous, it will not return until the callback
|
|
has been called on every row of the result. But this might change.
|
|
|
|
@param {string} sql A string of SQL text. Can contain placeholders
|
|
that will be bound to the parameters given as the second argument
|
|
@param {Statement.BindParams=} [params=] Parameters to bind to the query
|
|
@param {function(Object<string, Database.SqlValue>):void} callback
|
|
Function to call on each row of result
|
|
@param {function():void} done A function that will be called when
|
|
all rows have been retrieved
|
|
|
|
@return {Database} The database object. Useful for method chaining
|
|
|
|
@example <caption>Read values from a table</caption>
|
|
db.each("SELECT name,age FROM users WHERE age >= $majority", {$majority:18},
|
|
function (row){console.log(row.name + " is a grown-up.")}
|
|
);
|
|
*/
|
|
// eslint-disable-next-line max-len
|
|
Database.prototype["each"] = function each(sql, params, callback, done, config) {
|
|
var stmt;
|
|
if (typeof params === "function") {
|
|
done = callback;
|
|
callback = params;
|
|
params = undefined;
|
|
}
|
|
stmt = this["prepare"](sql, params);
|
|
try {
|
|
while (stmt["step"]()) {
|
|
callback(stmt["getAsObject"](null, config));
|
|
}
|
|
} finally {
|
|
stmt["free"]();
|
|
}
|
|
if (typeof done === "function") {
|
|
return done();
|
|
}
|
|
return undefined;
|
|
};
|
|
|
|
/** Prepare an SQL statement
|
|
@param {string} sql a string of SQL, that can contain placeholders
|
|
(`?`, `:VVV`, `:AAA`, `@AAA`)
|
|
@param {Statement.BindParams} [params] values to bind to placeholders
|
|
@return {Statement} the resulting statement
|
|
@throws {String} SQLite error
|
|
*/
|
|
Database.prototype["prepare"] = function prepare(sql, params) {
|
|
setValue(apiTemp, 0, "i32");
|
|
this.handleError(sqlite3_prepare_v2(this.db, sql, -1, apiTemp, NULL));
|
|
// pointer to a statement, or null
|
|
var pStmt = getValue(apiTemp, "i32");
|
|
if (pStmt === NULL) {
|
|
throw "Nothing to prepare";
|
|
}
|
|
var stmt = new Statement(pStmt, this);
|
|
if (params != null) {
|
|
stmt.bind(params);
|
|
}
|
|
this.statements[pStmt] = stmt;
|
|
return stmt;
|
|
};
|
|
|
|
/** Iterate over multiple SQL statements in a SQL string.
|
|
* This function returns an iterator over {@link Statement} objects.
|
|
* You can use a for..of loop to execute the returned statements one by one.
|
|
* @param {string} sql a string of SQL that can contain multiple statements
|
|
* @return {StatementIterator} the resulting statement iterator
|
|
* @example <caption>Get the results of multiple SQL queries</caption>
|
|
* const sql_queries = "SELECT 1 AS x; SELECT '2' as y";
|
|
* for (const statement of db.iterateStatements(sql_queries)) {
|
|
* const sql = statement.getSQL(); // Get the SQL source
|
|
* const result = statement.getAsObject({}); // Get the row of data
|
|
* console.log(sql, result);
|
|
* }
|
|
* // This will print:
|
|
* // 'SELECT 1 AS x;' { x: 1 }
|
|
* // " SELECT '2' as y" { y: '2' }
|
|
*/
|
|
Database.prototype["iterateStatements"] = function iterateStatements(sql) {
|
|
return new StatementIterator(sql, this);
|
|
};
|
|
|
|
/** Exports the contents of the database to a binary array. This
|
|
* operation will close and re-open the database which will cause
|
|
* any pragmas to be set back to their default values.
|
|
@return {Uint8Array} An array of bytes of the SQLite3 database file
|
|
*/
|
|
Database.prototype["export"] = function exportDatabase() {
|
|
Object.values(this.statements).forEach(function each(stmt) {
|
|
stmt["free"]();
|
|
});
|
|
Object.values(this.functions).forEach(removeFunction);
|
|
this.functions = {};
|
|
this.handleError(sqlite3_close_v2(this.db));
|
|
var binaryDb = FS.readFile(this.filename, { encoding: "binary" });
|
|
this.handleError(sqlite3_open(this.filename, apiTemp));
|
|
this.db = getValue(apiTemp, "i32");
|
|
registerExtensionFunctions(this.db);
|
|
return binaryDb;
|
|
};
|
|
|
|
/** Close the database, and all associated prepared statements.
|
|
* The memory associated to the database and all associated statements
|
|
* will be freed.
|
|
*
|
|
* **Warning**: A statement belonging to a database that has been closed
|
|
* cannot be used anymore.
|
|
*
|
|
* Databases **must** be closed when you're finished with them, or the
|
|
* memory consumption will grow forever
|
|
*/
|
|
Database.prototype["close"] = function close() {
|
|
// do nothing if db is null or already closed
|
|
if (this.db === null) {
|
|
return;
|
|
}
|
|
Object.values(this.statements).forEach(function each(stmt) {
|
|
stmt["free"]();
|
|
});
|
|
Object.values(this.functions).forEach(removeFunction);
|
|
this.functions = {};
|
|
|
|
if (this.updateHookFunctionPtr) {
|
|
removeFunction(this.updateHookFunctionPtr);
|
|
this.updateHookFunctionPtr = undefined;
|
|
}
|
|
|
|
this.handleError(sqlite3_close_v2(this.db));
|
|
FS.unlink("/" + this.filename);
|
|
this.db = null;
|
|
};
|
|
|
|
/** Analyze a result code, return null if no error occured, and throw
|
|
an error with a descriptive message otherwise
|
|
@nodoc
|
|
*/
|
|
Database.prototype["handleError"] = function handleError(returnCode) {
|
|
var errmsg;
|
|
if (returnCode === SQLITE_OK) {
|
|
return null;
|
|
}
|
|
errmsg = sqlite3_errmsg(this.db);
|
|
throw new Error(errmsg);
|
|
};
|
|
|
|
/** Returns the number of changed rows (modified, inserted or deleted)
|
|
by the latest completed INSERT, UPDATE or DELETE statement on the
|
|
database. Executing any other type of SQL statement does not modify
|
|
the value returned by this function.
|
|
|
|
@return {number} the number of rows modified
|
|
*/
|
|
Database.prototype["getRowsModified"] = function getRowsModified() {
|
|
return sqlite3_changes(this.db);
|
|
};
|
|
|
|
var extract_blob = function extract_blob(ptr) {
|
|
var size = sqlite3_value_bytes(ptr);
|
|
var blob_ptr = sqlite3_value_blob(ptr);
|
|
var blob_arg = new Uint8Array(size);
|
|
for (var j = 0; j < size; j += 1) {
|
|
blob_arg[j] = HEAP8[blob_ptr + j];
|
|
}
|
|
return blob_arg;
|
|
};
|
|
|
|
var parseFunctionArguments = function parseFunctionArguments(argc, argv) {
|
|
var args = [];
|
|
for (var i = 0; i < argc; i += 1) {
|
|
var value_ptr = getValue(argv + (4 * i), "i32");
|
|
var value_type = sqlite3_value_type(value_ptr);
|
|
var arg;
|
|
if (
|
|
value_type === SQLITE_INTEGER
|
|
|| value_type === SQLITE_FLOAT
|
|
) {
|
|
arg = sqlite3_value_double(value_ptr);
|
|
} else if (value_type === SQLITE_TEXT) {
|
|
arg = sqlite3_value_text(value_ptr);
|
|
} else if (value_type === SQLITE_BLOB) {
|
|
arg = extract_blob(value_ptr);
|
|
} else arg = null;
|
|
args.push(arg);
|
|
}
|
|
return args;
|
|
};
|
|
var setFunctionResult = function setFunctionResult(cx, result) {
|
|
switch (typeof result) {
|
|
case "boolean":
|
|
sqlite3_result_int(cx, result ? 1 : 0);
|
|
break;
|
|
case "number":
|
|
sqlite3_result_double(cx, result);
|
|
break;
|
|
case "string":
|
|
sqlite3_result_text(cx, result, -1, -1);
|
|
break;
|
|
case "object":
|
|
if (result === null) {
|
|
sqlite3_result_null(cx);
|
|
} else if (result.length != null) {
|
|
var blobptr = _malloc(result.length);
|
|
writeArrayToMemory(result, blobptr);
|
|
sqlite3_result_blob(cx, blobptr, result.length, -1);
|
|
_free(blobptr);
|
|
} else {
|
|
sqlite3_result_error(
|
|
cx, (
|
|
"Wrong API use : tried to return a value "
|
|
+ "of an unknown type (" + result + ")."
|
|
), -1
|
|
);
|
|
}
|
|
break;
|
|
default:
|
|
sqlite3_result_null(cx);
|
|
}
|
|
};
|
|
|
|
/** Register a custom function with SQLite
|
|
@example <caption>Register a simple function</caption>
|
|
db.create_function("addOne", function (x) {return x+1;})
|
|
db.exec("SELECT addOne(1)") // = 2
|
|
|
|
@param {string} name the name of the function as referenced in
|
|
SQL statements.
|
|
@param {function(any)} func the actual function to be executed.
|
|
@return {Database} The database object. Useful for method chaining
|
|
*/
|
|
Database.prototype["create_function"] = function create_function(
|
|
name,
|
|
func
|
|
) {
|
|
function wrapped_func(cx, argc, argv) {
|
|
var args = parseFunctionArguments(argc, argv);
|
|
var result;
|
|
try {
|
|
result = func.apply(null, args);
|
|
} catch (error) {
|
|
sqlite3_result_error(cx, error, -1);
|
|
return;
|
|
}
|
|
setFunctionResult(cx, result);
|
|
}
|
|
if (Object.prototype.hasOwnProperty.call(this.functions, name)) {
|
|
removeFunction(this.functions[name]);
|
|
delete this.functions[name];
|
|
}
|
|
// The signature of the wrapped function is :
|
|
// void wrapped(sqlite3_context *db, int argc, sqlite3_value **argv)
|
|
var func_ptr = addFunction(wrapped_func, "viii");
|
|
this.functions[name] = func_ptr;
|
|
this.handleError(sqlite3_create_function_v2(
|
|
this.db,
|
|
name,
|
|
func.length,
|
|
SQLITE_UTF8,
|
|
0,
|
|
func_ptr,
|
|
0,
|
|
0,
|
|
0
|
|
));
|
|
return this;
|
|
};
|
|
|
|
/** Register a custom aggregate with SQLite
|
|
@example <caption>Register a custom sum function</caption>
|
|
db.create_aggregate("js_sum", {
|
|
init: () => 0,
|
|
step: (state, value) => state + value,
|
|
finalize: state => state
|
|
});
|
|
db.exec("SELECT js_sum(column1) FROM (VALUES (1), (2))"); // = 3
|
|
|
|
@param {string} name the name of the aggregate as referenced in
|
|
SQL statements.
|
|
@param {object} aggregateFunctions
|
|
object containing at least a step function.
|
|
@param {function(): T} [aggregateFunctions.init=]
|
|
a function receiving no arguments and returning an initial
|
|
value for the aggregate function. The initial value will be
|
|
null if this key is omitted.
|
|
@param {function(T, any) : T} aggregateFunctions.step
|
|
a function receiving the current state and a value to aggregate
|
|
and returning a new state.
|
|
Will receive the value from init for the first step.
|
|
@param {function(T): any} [aggregateFunctions.finalize=]
|
|
a function returning the result of the aggregate function
|
|
given its final state.
|
|
If omitted, the value returned by the last step
|
|
will be used as the final value.
|
|
@return {Database} The database object. Useful for method chaining
|
|
@template T
|
|
*/
|
|
Database.prototype["create_aggregate"] = function create_aggregate(
|
|
name,
|
|
aggregateFunctions
|
|
) {
|
|
// Default initializer and finalizer
|
|
var init = aggregateFunctions["init"]
|
|
|| function init() { return null; };
|
|
var finalize = aggregateFunctions["finalize"]
|
|
|| function finalize(state) { return state; };
|
|
var step = aggregateFunctions["step"];
|
|
|
|
if (!step) {
|
|
throw "An aggregate function must have a step function in " + name;
|
|
}
|
|
|
|
// state is a state object; we'll use the pointer p to serve as the
|
|
// key for where we hold our state so that multiple invocations of
|
|
// this function never step on each other
|
|
var state = {};
|
|
|
|
function wrapped_step(cx, argc, argv) {
|
|
// > The first time the sqlite3_aggregate_context(C,N) routine is
|
|
// > called for a particular aggregate function, SQLite allocates N
|
|
// > bytes of memory, zeroes out that memory, and returns a pointer
|
|
// > to the new memory.
|
|
//
|
|
// We're going to use that pointer as a key to our state array,
|
|
// since using sqlite3_aggregate_context as it's meant to be used
|
|
// through webassembly seems to be very difficult. Just allocate
|
|
// one byte.
|
|
var p = sqlite3_aggregate_context(cx, 1);
|
|
|
|
// If this is the first invocation of wrapped_step, call `init`
|
|
//
|
|
// Make sure that every path through the step and finalize
|
|
// functions deletes the value state[p] when it's done so we don't
|
|
// leak memory and possibly stomp the init value of future calls
|
|
if (!Object.hasOwnProperty.call(state, p)) state[p] = init();
|
|
|
|
var args = parseFunctionArguments(argc, argv);
|
|
var mergedArgs = [state[p]].concat(args);
|
|
try {
|
|
state[p] = step.apply(null, mergedArgs);
|
|
} catch (error) {
|
|
delete state[p];
|
|
sqlite3_result_error(cx, error, -1);
|
|
}
|
|
}
|
|
|
|
function wrapped_finalize(cx) {
|
|
var result;
|
|
var p = sqlite3_aggregate_context(cx, 1);
|
|
try {
|
|
result = finalize(state[p]);
|
|
} catch (error) {
|
|
delete state[p];
|
|
sqlite3_result_error(cx, error, -1);
|
|
return;
|
|
}
|
|
setFunctionResult(cx, result);
|
|
delete state[p];
|
|
}
|
|
|
|
if (Object.hasOwnProperty.call(this.functions, name)) {
|
|
removeFunction(this.functions[name]);
|
|
delete this.functions[name];
|
|
}
|
|
var finalize_name = name + "__finalize";
|
|
if (Object.hasOwnProperty.call(this.functions, finalize_name)) {
|
|
removeFunction(this.functions[finalize_name]);
|
|
delete this.functions[finalize_name];
|
|
}
|
|
// The signature of the wrapped function is :
|
|
// void wrapped(sqlite3_context *db, int argc, sqlite3_value **argv)
|
|
var step_ptr = addFunction(wrapped_step, "viii");
|
|
|
|
// The signature of the wrapped function is :
|
|
// void wrapped(sqlite3_context *db)
|
|
var finalize_ptr = addFunction(wrapped_finalize, "vi");
|
|
this.functions[name] = step_ptr;
|
|
this.functions[finalize_name] = finalize_ptr;
|
|
|
|
// passing null to the sixth parameter defines this as an aggregate
|
|
// function
|
|
//
|
|
// > An aggregate SQL function requires an implementation of xStep and
|
|
// > xFinal and NULL pointer must be passed for xFunc.
|
|
// - http://www.sqlite.org/c3ref/create_function.html
|
|
this.handleError(sqlite3_create_function_v2(
|
|
this.db,
|
|
name,
|
|
step.length - 1,
|
|
SQLITE_UTF8,
|
|
0,
|
|
0,
|
|
step_ptr,
|
|
finalize_ptr,
|
|
0
|
|
));
|
|
return this;
|
|
};
|
|
|
|
/** Registers an update hook with SQLite.
|
|
*
|
|
* Every time a row is changed by whatever means, the callback is called
|
|
* once with the change (`'insert'`, `'update'` or `'delete'`), the database
|
|
* name and table name where the change happened and the
|
|
* [rowid](https://www.sqlite.org/rowidtable.html)
|
|
* of the row that has been changed.
|
|
*
|
|
* The rowid is cast to a plain number. If it exceeds
|
|
* `Number.MAX_SAFE_INTEGER` (2^53 - 1), an error will be thrown.
|
|
*
|
|
* **Important notes:**
|
|
* - The callback **MUST NOT** modify the database in any way
|
|
* - Only a single callback can be registered at a time
|
|
* - Unregister the callback by passing `null`
|
|
* - Not called for some updates like `ON REPLACE CONFLICT` and `TRUNCATE`
|
|
* (a `DELETE FROM` without a `WHERE` clause)
|
|
*
|
|
* See SQLite documentation on
|
|
* [sqlite3_update_hook](https://www.sqlite.org/c3ref/update_hook.html)
|
|
* for more details
|
|
*
|
|
* @example
|
|
* // Create a database and table
|
|
* var db = new SQL.Database();
|
|
* db.exec(`
|
|
* CREATE TABLE users (
|
|
* id INTEGER PRIMARY KEY, -- this is the rowid column
|
|
* name TEXT,
|
|
* active INTEGER
|
|
* )
|
|
* `);
|
|
*
|
|
* // Register an update hook
|
|
* var changes = [];
|
|
* db.updateHook(function(operation, database, table, rowId) {
|
|
* changes.push({operation, database, table, rowId});
|
|
* console.log(`${operation} on ${database}.${table} row ${rowId}`);
|
|
* });
|
|
*
|
|
* // Insert a row - triggers the update hook with 'insert'
|
|
* db.run("INSERT INTO users VALUES (1, 'Alice', 1)");
|
|
* // Logs: "insert on main.users row 1"
|
|
*
|
|
* // Update a row - triggers the update hook with 'update'
|
|
* db.run("UPDATE users SET active = 0 WHERE id = 1");
|
|
* // Logs: "update on main.users row 1"
|
|
*
|
|
* // Delete a row - triggers the update hook with 'delete'
|
|
* db.run("DELETE FROM users WHERE id = 1");
|
|
* // Logs: "delete on main.users row 1"
|
|
*
|
|
* // Unregister the update hook
|
|
* db.updateHook(null);
|
|
*
|
|
* // This won't trigger any callback
|
|
* db.run("INSERT INTO users VALUES (2, 'Bob', 1)");
|
|
*
|
|
* @param {Database~UpdateHookCallback|null} callback
|
|
* - Callback to be executed when a row changes. Takes the type of change,
|
|
* the name of the database, the name of the table, and the row id of the
|
|
* changed row.
|
|
* - Set to `null` to unregister.
|
|
* @returns {Database} The database object. Useful for method chaining
|
|
*/
|
|
Database.prototype["updateHook"] = function updateHook(callback) {
|
|
if (this.updateHookFunctionPtr) {
|
|
// unregister and cleanup a previously registered update hook
|
|
sqlite3_update_hook(this.db, 0, 0);
|
|
removeFunction(this.updateHookFunctionPtr);
|
|
this.updateHookFunctionPtr = undefined;
|
|
}
|
|
|
|
if (!callback) {
|
|
// no new callback to register
|
|
return this;
|
|
}
|
|
|
|
// void(*)(void *,int ,char const *,char const *,sqlite3_int64)
|
|
function wrappedCallback(
|
|
ignored,
|
|
operationCode,
|
|
databaseNamePtr,
|
|
tableNamePtr,
|
|
rowIdBigInt
|
|
) {
|
|
var operation;
|
|
|
|
switch (operationCode) {
|
|
case SQLITE_INSERT:
|
|
operation = "insert";
|
|
break;
|
|
case SQLITE_UPDATE:
|
|
operation = "update";
|
|
break;
|
|
case SQLITE_DELETE:
|
|
operation = "delete";
|
|
break;
|
|
default:
|
|
throw "unknown operationCode in updateHook callback: "
|
|
+ operationCode;
|
|
}
|
|
|
|
var databaseName = UTF8ToString(databaseNamePtr);
|
|
var tableName = UTF8ToString(tableNamePtr);
|
|
|
|
if (rowIdBigInt > Number.MAX_SAFE_INTEGER) {
|
|
throw "rowId too big to fit inside a Number";
|
|
}
|
|
|
|
var rowId = Number(rowIdBigInt);
|
|
|
|
callback(operation, databaseName, tableName, rowId);
|
|
}
|
|
|
|
this.updateHookFunctionPtr = addFunction(wrappedCallback, "viiiij");
|
|
|
|
sqlite3_update_hook(
|
|
this.db,
|
|
this.updateHookFunctionPtr,
|
|
0 // passed as the first arg to wrappedCallback
|
|
);
|
|
return this;
|
|
};
|
|
|
|
/**
|
|
* @callback Database~UpdateHookCallback
|
|
* @param {'insert'|'update'|'delete'} operation
|
|
* - The type of change that occurred
|
|
* @param {string} database
|
|
* - The name of the database where the change occurred
|
|
* @param {string} table
|
|
* - The name of the database's table where the change occurred
|
|
* @param {number} rowId
|
|
* - The [rowid](https://www.sqlite.org/rowidtable.html) of the changed row
|
|
*/
|
|
|
|
// export Database to Module
|
|
Module.Database = Database;
|
|
};
|
|
// end include: src/api.js
|
|
|
|
|
|
var arguments_ = [];
|
|
var thisProgram = './this.program';
|
|
var quit_ = (status, toThrow) => {
|
|
throw toThrow;
|
|
};
|
|
|
|
// In MODULARIZE mode _scriptName needs to be captured already at the very top of the page immediately when the page is parsed, so it is generated there
|
|
// before the page load. In non-MODULARIZE modes generate it here.
|
|
var _scriptName = globalThis.document?.currentScript?.src;
|
|
|
|
if (ENVIRONMENT_IS_WORKER) {
|
|
_scriptName = self.location.href;
|
|
}
|
|
|
|
// `/` should be present at the end if `scriptDirectory` is not empty
|
|
var scriptDirectory = '';
|
|
function locateFile(path) {
|
|
if (Module['locateFile']) {
|
|
return Module['locateFile'](path, scriptDirectory);
|
|
}
|
|
return scriptDirectory + path;
|
|
}
|
|
|
|
// Hooks that are implemented differently in different runtime environments.
|
|
var readAsync, readBinary;
|
|
|
|
if (ENVIRONMENT_IS_SHELL) {
|
|
|
|
} else
|
|
|
|
// Note that this includes Node.js workers when relevant (pthreads is enabled).
|
|
// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and
|
|
// ENVIRONMENT_IS_NODE.
|
|
if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
|
|
try {
|
|
scriptDirectory = new URL('.', _scriptName).href; // includes trailing slash
|
|
} catch {
|
|
// Must be a `blob:` or `data:` URL (e.g. `blob:http://site.com/etc/etc`), we cannot
|
|
// infer anything from them.
|
|
}
|
|
|
|
if (!(globalThis.window || globalThis.WorkerGlobalScope)) throw new Error('not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)');
|
|
|
|
{
|
|
// include: web_or_worker_shell_read.js
|
|
if (ENVIRONMENT_IS_WORKER) {
|
|
readBinary = (url) => {
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('GET', url, false);
|
|
xhr.responseType = 'arraybuffer';
|
|
xhr.send(null);
|
|
return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response));
|
|
};
|
|
}
|
|
|
|
readAsync = async (url) => {
|
|
assert(!isFileURI(url), "readAsync does not work with file:// URLs");
|
|
var response = await fetch(url, { credentials: 'same-origin' });
|
|
if (response.ok) {
|
|
return response.arrayBuffer();
|
|
}
|
|
throw new Error(response.status + ' : ' + response.url);
|
|
};
|
|
// end include: web_or_worker_shell_read.js
|
|
}
|
|
} else
|
|
{
|
|
throw new Error('environment detection error');
|
|
}
|
|
|
|
var out = console.log.bind(console);
|
|
var err = console.error.bind(console);
|
|
|
|
var IDBFS = 'IDBFS is no longer included by default; build with -lidbfs.js';
|
|
var PROXYFS = 'PROXYFS is no longer included by default; build with -lproxyfs.js';
|
|
var WORKERFS = 'WORKERFS is no longer included by default; build with -lworkerfs.js';
|
|
var FETCHFS = 'FETCHFS is no longer included by default; build with -lfetchfs.js';
|
|
var ICASEFS = 'ICASEFS is no longer included by default; build with -licasefs.js';
|
|
var JSFILEFS = 'JSFILEFS is no longer included by default; build with -ljsfilefs.js';
|
|
var OPFS = 'OPFS is no longer included by default; build with -lopfs.js';
|
|
|
|
var NODEFS = 'NODEFS is no longer included by default; build with -lnodefs.js';
|
|
|
|
// perform assertions in shell.js after we set up out() and err(), as otherwise
|
|
// if an assertion fails it cannot print the message
|
|
|
|
assert(!ENVIRONMENT_IS_NODE, 'node environment detected but not enabled at build time. Add `node` to `-sENVIRONMENT` to enable.');
|
|
|
|
assert(!ENVIRONMENT_IS_SHELL, 'shell environment detected but not enabled at build time. Add `shell` to `-sENVIRONMENT` to enable.');
|
|
|
|
// end include: shell.js
|
|
|
|
// include: preamble.js
|
|
// === Preamble library stuff ===
|
|
|
|
// Documentation for the public APIs defined in this file must be updated in:
|
|
// site/source/docs/api_reference/preamble.js.rst
|
|
// A prebuilt local version of the documentation is available at:
|
|
// site/build/text/docs/api_reference/preamble.js.txt
|
|
// You can also build docs locally as HTML or other formats in site/
|
|
// An online HTML version (which may be of a different version of Emscripten)
|
|
// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html
|
|
|
|
var wasmBinary;
|
|
|
|
if (!globalThis.WebAssembly) {
|
|
err('no native wasm support detected');
|
|
}
|
|
|
|
// Wasm globals
|
|
|
|
//========================================
|
|
// Runtime essentials
|
|
//========================================
|
|
|
|
// whether we are quitting the application. no code should run after this.
|
|
// set in exit() and abort()
|
|
var ABORT = false;
|
|
|
|
// set by exit() and abort(). Passed to 'onExit' handler.
|
|
// NOTE: This is also used as the process return code in shell environments
|
|
// but only when noExitRuntime is false.
|
|
var EXITSTATUS;
|
|
|
|
// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we
|
|
// don't define it at all in release modes. This matches the behaviour of
|
|
// MINIMAL_RUNTIME.
|
|
// TODO(sbc): Make this the default even without STRICT enabled.
|
|
/** @type {function(*, string=)} */
|
|
function assert(condition, text) {
|
|
if (!condition) {
|
|
abort('Assertion failed' + (text ? ': ' + text : ''));
|
|
}
|
|
}
|
|
|
|
// We used to include malloc/free by default in the past. Show a helpful error in
|
|
// builds with assertions.
|
|
|
|
/**
|
|
* Indicates whether filename is delivered via file protocol (as opposed to http/https)
|
|
* @noinline
|
|
*/
|
|
var isFileURI = (filename) => filename.startsWith('file://');
|
|
|
|
// include: runtime_common.js
|
|
// include: runtime_stack_check.js
|
|
// Initializes the stack cookie. Called at the startup of main and at the startup of each thread in pthreads mode.
|
|
function writeStackCookie() {
|
|
var max = _emscripten_stack_get_end();
|
|
assert((max & 3) == 0);
|
|
// If the stack ends at address zero we write our cookies 4 bytes into the
|
|
// stack. This prevents interference with SAFE_HEAP and ASAN which also
|
|
// monitor writes to address zero.
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
// The stack grow downwards towards _emscripten_stack_get_end.
|
|
// We write cookies to the final two words in the stack and detect if they are
|
|
// ever overwritten.
|
|
HEAPU32[((max)>>2)] = 0x02135467;checkInt32(0x02135467);
|
|
HEAPU32[(((max)+(4))>>2)] = 0x89BACDFE;checkInt32(0x89BACDFE);
|
|
// Also test the global address 0 for integrity.
|
|
HEAPU32[((0)>>2)] = 1668509029;checkInt32(1668509029);
|
|
}
|
|
|
|
function checkStackCookie() {
|
|
if (ABORT) return;
|
|
var max = _emscripten_stack_get_end();
|
|
// See writeStackCookie().
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
var cookie1 = HEAPU32[((max)>>2)];
|
|
var cookie2 = HEAPU32[(((max)+(4))>>2)];
|
|
if (cookie1 != 0x02135467 || cookie2 != 0x89BACDFE) {
|
|
abort(`Stack overflow! Stack cookie has been overwritten at ${ptrToString(max)}, expected hex dwords 0x89BACDFE and 0x2135467, but received ${ptrToString(cookie2)} ${ptrToString(cookie1)}`);
|
|
}
|
|
// Also test the global address 0 for integrity.
|
|
if (HEAPU32[((0)>>2)] != 0x63736d65 /* 'emsc' */) {
|
|
abort('Runtime error: The application has corrupted its heap memory area (address zero)!');
|
|
}
|
|
}
|
|
// end include: runtime_stack_check.js
|
|
// include: runtime_exceptions.js
|
|
// end include: runtime_exceptions.js
|
|
// include: runtime_debug.js
|
|
var runtimeDebug = true; // Switch to false at runtime to disable logging at the right times
|
|
|
|
// Used by XXXXX_DEBUG settings to output debug messages.
|
|
function dbg(...args) {
|
|
if (!runtimeDebug && typeof runtimeDebug != 'undefined') return;
|
|
// TODO(sbc): Make this configurable somehow. Its not always convenient for
|
|
// logging to show up as warnings.
|
|
console.warn(...args);
|
|
}
|
|
|
|
// Endianness check
|
|
(() => {
|
|
var h16 = new Int16Array(1);
|
|
var h8 = new Int8Array(h16.buffer);
|
|
h16[0] = 0x6373;
|
|
if (h8[0] !== 0x73 || h8[1] !== 0x63) abort('Runtime error: expected the system to be little-endian! (Run with -sSUPPORT_BIG_ENDIAN to bypass)');
|
|
})();
|
|
|
|
function consumedModuleProp(prop) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
Object.defineProperty(Module, prop, {
|
|
configurable: true,
|
|
set() {
|
|
abort(`Attempt to set \`Module.${prop}\` after it has already been processed. This can happen, for example, when code is injected via '--post-js' rather than '--pre-js'`);
|
|
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function makeInvalidEarlyAccess(name) {
|
|
return () => assert(false, `call to '${name}' via reference taken before Wasm module initialization`);
|
|
|
|
}
|
|
|
|
function ignoredModuleProp(prop) {
|
|
if (Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
abort(`\`Module.${prop}\` was supplied but \`${prop}\` not included in INCOMING_MODULE_JS_API`);
|
|
}
|
|
}
|
|
|
|
// forcing the filesystem exports a few things by default
|
|
function isExportedByForceFilesystem(name) {
|
|
return name === 'FS_createPath' ||
|
|
name === 'FS_createDataFile' ||
|
|
name === 'FS_createPreloadedFile' ||
|
|
name === 'FS_preloadFile' ||
|
|
name === 'FS_unlink' ||
|
|
name === 'addRunDependency' ||
|
|
// The old FS has some functionality that WasmFS lacks.
|
|
name === 'FS_createLazyFile' ||
|
|
name === 'FS_createDevice' ||
|
|
name === 'removeRunDependency';
|
|
}
|
|
|
|
/**
|
|
* Intercept access to a symbols in the global symbol. This enables us to give
|
|
* informative warnings/errors when folks attempt to use symbols they did not
|
|
* include in their build, or no symbols that no longer exist.
|
|
*
|
|
* We don't define this in MODULARIZE mode since in that mode emscripten symbols
|
|
* are never placed in the global scope.
|
|
*/
|
|
function hookGlobalSymbolAccess(sym, func) {
|
|
if (!Object.getOwnPropertyDescriptor(globalThis, sym)) {
|
|
Object.defineProperty(globalThis, sym, {
|
|
configurable: true,
|
|
get() {
|
|
func();
|
|
return undefined;
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function missingGlobal(sym, msg) {
|
|
hookGlobalSymbolAccess(sym, () => {
|
|
warnOnce(`\`${sym}\` is no longer defined by emscripten. ${msg}`);
|
|
});
|
|
}
|
|
|
|
missingGlobal('buffer', 'Please use HEAP8.buffer or wasmMemory.buffer');
|
|
missingGlobal('asm', 'Please use wasmExports instead');
|
|
|
|
function missingLibrarySymbol(sym) {
|
|
hookGlobalSymbolAccess(sym, () => {
|
|
// Can't `abort()` here because it would break code that does runtime
|
|
// checks. e.g. `if (typeof SDL === 'undefined')`.
|
|
var msg = `\`${sym}\` is a library symbol and not included by default; add it to your library.js __deps or to DEFAULT_LIBRARY_FUNCS_TO_INCLUDE on the command line`;
|
|
// DEFAULT_LIBRARY_FUNCS_TO_INCLUDE requires the name as it appears in
|
|
// library.js, which means $name for a JS name with no prefix, or name
|
|
// for a JS name like _name.
|
|
var librarySymbol = sym;
|
|
if (!librarySymbol.startsWith('_')) {
|
|
librarySymbol = '$' + sym;
|
|
}
|
|
msg += ` (e.g. -sDEFAULT_LIBRARY_FUNCS_TO_INCLUDE='${librarySymbol}')`;
|
|
if (isExportedByForceFilesystem(sym)) {
|
|
msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you';
|
|
}
|
|
warnOnce(msg);
|
|
});
|
|
|
|
// Any symbol that is not included from the JS library is also (by definition)
|
|
// not exported on the Module object.
|
|
unexportedRuntimeSymbol(sym);
|
|
}
|
|
|
|
function unexportedRuntimeSymbol(sym) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, sym)) {
|
|
Object.defineProperty(Module, sym, {
|
|
configurable: true,
|
|
get() {
|
|
var msg = `'${sym}' was not exported. add it to EXPORTED_RUNTIME_METHODS (see the Emscripten FAQ)`;
|
|
if (isExportedByForceFilesystem(sym)) {
|
|
msg += '. Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you';
|
|
}
|
|
abort(msg);
|
|
},
|
|
});
|
|
}
|
|
}
|
|
|
|
var MAX_UINT8 = (2 ** 8) - 1;
|
|
var MAX_UINT16 = (2 ** 16) - 1;
|
|
var MAX_UINT32 = (2 ** 32) - 1;
|
|
var MAX_UINT53 = (2 ** 53) - 1;
|
|
var MAX_UINT64 = (2 ** 64) - 1;
|
|
|
|
var MIN_INT8 = - (2 ** ( 8 - 1));
|
|
var MIN_INT16 = - (2 ** (16 - 1));
|
|
var MIN_INT32 = - (2 ** (32 - 1));
|
|
var MIN_INT53 = - (2 ** (53 - 1));
|
|
var MIN_INT64 = - (2 ** (64 - 1));
|
|
|
|
function checkInt(value, bits, min, max) {
|
|
assert(Number.isInteger(Number(value)), `attempt to write non-integer (${value}) into integer heap`);
|
|
assert(value <= max, `value (${value}) too large to write as ${bits}-bit value`);
|
|
assert(value >= min, `value (${value}) too small to write as ${bits}-bit value`);
|
|
}
|
|
|
|
var checkInt1 = (value) => checkInt(value, 1, 1);
|
|
var checkInt8 = (value) => checkInt(value, 8, MIN_INT8, MAX_UINT8);
|
|
var checkInt16 = (value) => checkInt(value, 16, MIN_INT16, MAX_UINT16);
|
|
var checkInt32 = (value) => checkInt(value, 32, MIN_INT32, MAX_UINT32);
|
|
var checkInt53 = (value) => checkInt(value, 53, MIN_INT53, MAX_UINT53);
|
|
var checkInt64 = (value) => checkInt(value, 64, MIN_INT64, MAX_UINT64);
|
|
|
|
// end include: runtime_debug.js
|
|
// Memory management
|
|
var
|
|
/** @type {!Int8Array} */
|
|
HEAP8,
|
|
/** @type {!Uint8Array} */
|
|
HEAPU8,
|
|
/** @type {!Int16Array} */
|
|
HEAP16,
|
|
/** @type {!Uint16Array} */
|
|
HEAPU16,
|
|
/** @type {!Int32Array} */
|
|
HEAP32,
|
|
/** @type {!Uint32Array} */
|
|
HEAPU32,
|
|
/** @type {!Float32Array} */
|
|
HEAPF32,
|
|
/** @type {!Float64Array} */
|
|
HEAPF64;
|
|
|
|
// BigInt64Array type is not correctly defined in closure
|
|
var
|
|
/** not-@type {!BigInt64Array} */
|
|
HEAP64,
|
|
/* BigUint64Array type is not correctly defined in closure
|
|
/** not-@type {!BigUint64Array} */
|
|
HEAPU64;
|
|
|
|
var runtimeInitialized = false;
|
|
|
|
|
|
|
|
function updateMemoryViews() {
|
|
var b = wasmMemory.buffer;
|
|
HEAP8 = new Int8Array(b);
|
|
HEAP16 = new Int16Array(b);
|
|
HEAPU8 = new Uint8Array(b);
|
|
HEAPU16 = new Uint16Array(b);
|
|
HEAP32 = new Int32Array(b);
|
|
HEAPU32 = new Uint32Array(b);
|
|
HEAPF32 = new Float32Array(b);
|
|
HEAPF64 = new Float64Array(b);
|
|
HEAP64 = new BigInt64Array(b);
|
|
HEAPU64 = new BigUint64Array(b);
|
|
}
|
|
|
|
// include: memoryprofiler.js
|
|
// end include: memoryprofiler.js
|
|
// end include: runtime_common.js
|
|
assert(globalThis.Int32Array && globalThis.Float64Array && Int32Array.prototype.subarray && Int32Array.prototype.set,
|
|
'JS engine does not provide full typed array support');
|
|
|
|
function preRun() {
|
|
if (Module['preRun']) {
|
|
if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']];
|
|
while (Module['preRun'].length) {
|
|
addOnPreRun(Module['preRun'].shift());
|
|
}
|
|
}
|
|
consumedModuleProp('preRun');
|
|
// Begin ATPRERUNS hooks
|
|
callRuntimeCallbacks(onPreRuns);
|
|
// End ATPRERUNS hooks
|
|
}
|
|
|
|
function initRuntime() {
|
|
assert(!runtimeInitialized);
|
|
runtimeInitialized = true;
|
|
|
|
setStackLimits();
|
|
|
|
checkStackCookie();
|
|
|
|
// Begin ATINITS hooks
|
|
if (!Module['noFSInit'] && !FS.initialized) FS.init();
|
|
TTY.init();
|
|
// End ATINITS hooks
|
|
|
|
wasmExports['__wasm_call_ctors']();
|
|
|
|
// Begin ATPOSTCTORS hooks
|
|
FS.ignorePermissions = false;
|
|
// End ATPOSTCTORS hooks
|
|
}
|
|
|
|
function postRun() {
|
|
checkStackCookie();
|
|
// PThreads reuse the runtime from the main thread.
|
|
|
|
if (Module['postRun']) {
|
|
if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']];
|
|
while (Module['postRun'].length) {
|
|
addOnPostRun(Module['postRun'].shift());
|
|
}
|
|
}
|
|
consumedModuleProp('postRun');
|
|
|
|
// Begin ATPOSTRUNS hooks
|
|
callRuntimeCallbacks(onPostRuns);
|
|
// End ATPOSTRUNS hooks
|
|
}
|
|
|
|
/** @param {string|number=} what */
|
|
function abort(what) {
|
|
Module['onAbort']?.(what);
|
|
|
|
what = 'Aborted(' + what + ')';
|
|
// TODO(sbc): Should we remove printing and leave it up to whoever
|
|
// catches the exception?
|
|
err(what);
|
|
|
|
ABORT = true;
|
|
|
|
// Use a wasm runtime error, because a JS error might be seen as a foreign
|
|
// exception, which means we'd run destructors on it. We need the error to
|
|
// simply make the program stop.
|
|
// FIXME This approach does not work in Wasm EH because it currently does not assume
|
|
// all RuntimeErrors are from traps; it decides whether a RuntimeError is from
|
|
// a trap or not based on a hidden field within the object. So at the moment
|
|
// we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that
|
|
// allows this in the wasm spec.
|
|
|
|
// Suppress closure compiler warning here. Closure compiler's builtin extern
|
|
// definition for WebAssembly.RuntimeError claims it takes no arguments even
|
|
// though it can.
|
|
// TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed.
|
|
/** @suppress {checkTypes} */
|
|
var e = new WebAssembly.RuntimeError(what);
|
|
|
|
// Throw the error whether or not MODULARIZE is set because abort is used
|
|
// in code paths apart from instantiation where an exception is expected
|
|
// to be thrown when abort is called.
|
|
throw e;
|
|
}
|
|
|
|
function createExportWrapper(name, nargs) {
|
|
return (...args) => {
|
|
assert(runtimeInitialized, `native function \`${name}\` called before runtime initialization`);
|
|
var f = wasmExports[name];
|
|
assert(f, `exported native function \`${name}\` not found`);
|
|
// Only assert for too many arguments. Too few can be valid since the missing arguments will be zero filled.
|
|
assert(args.length <= nargs, `native function \`${name}\` called with ${args.length} args but expects ${nargs}`);
|
|
return f(...args);
|
|
};
|
|
}
|
|
|
|
var wasmBinaryFile;
|
|
|
|
function findWasmBinary() {
|
|
return locateFile('sql-wasm-browser-debug.wasm');
|
|
}
|
|
|
|
function getBinarySync(file) {
|
|
if (file == wasmBinaryFile && wasmBinary) {
|
|
return new Uint8Array(wasmBinary);
|
|
}
|
|
if (readBinary) {
|
|
return readBinary(file);
|
|
}
|
|
// Throwing a plain string here, even though it not normally advisable since
|
|
// this gets turning into an `abort` in instantiateArrayBuffer.
|
|
throw 'both async and sync fetching of the wasm failed';
|
|
}
|
|
|
|
async function getWasmBinary(binaryFile) {
|
|
// If we don't have the binary yet, load it asynchronously using readAsync.
|
|
if (!wasmBinary) {
|
|
// Fetch the binary using readAsync
|
|
try {
|
|
var response = await readAsync(binaryFile);
|
|
return new Uint8Array(response);
|
|
} catch {
|
|
// Fall back to getBinarySync below;
|
|
}
|
|
}
|
|
|
|
// Otherwise, getBinarySync should be able to get it synchronously
|
|
return getBinarySync(binaryFile);
|
|
}
|
|
|
|
async function instantiateArrayBuffer(binaryFile, imports) {
|
|
try {
|
|
var binary = await getWasmBinary(binaryFile);
|
|
var instance = await WebAssembly.instantiate(binary, imports);
|
|
return instance;
|
|
} catch (reason) {
|
|
err(`failed to asynchronously prepare wasm: ${reason}`);
|
|
|
|
// Warn on some common problems.
|
|
if (isFileURI(binaryFile)) {
|
|
err(`warning: Loading from a file URI (${binaryFile}) is not supported in most browsers. See https://emscripten.org/docs/getting_started/FAQ.html#how-do-i-run-a-local-webserver-for-testing-why-does-my-program-stall-in-downloading-or-preparing`);
|
|
}
|
|
abort(reason);
|
|
}
|
|
}
|
|
|
|
async function instantiateAsync(binary, binaryFile, imports) {
|
|
if (!binary
|
|
) {
|
|
try {
|
|
var response = fetch(binaryFile, { credentials: 'same-origin' });
|
|
var instantiationResult = await WebAssembly.instantiateStreaming(response, imports);
|
|
return instantiationResult;
|
|
} catch (reason) {
|
|
// We expect the most common failure cause to be a bad MIME type for the binary,
|
|
// in which case falling back to ArrayBuffer instantiation should work.
|
|
err(`wasm streaming compile failed: ${reason}`);
|
|
err('falling back to ArrayBuffer instantiation');
|
|
// fall back of instantiateArrayBuffer below
|
|
};
|
|
}
|
|
return instantiateArrayBuffer(binaryFile, imports);
|
|
}
|
|
|
|
function getWasmImports() {
|
|
// prepare imports
|
|
var imports = {
|
|
'env': wasmImports,
|
|
'wasi_snapshot_preview1': wasmImports,
|
|
};
|
|
return imports;
|
|
}
|
|
|
|
// Create the wasm instance.
|
|
// Receives the wasm imports, returns the exports.
|
|
async function createWasm() {
|
|
// Load the wasm module and create an instance of using native support in the JS engine.
|
|
// handle a generated wasm instance, receiving its exports and
|
|
// performing other necessary setup
|
|
/** @param {WebAssembly.Module=} module*/
|
|
function receiveInstance(instance, module) {
|
|
wasmExports = instance.exports;
|
|
|
|
assignWasmExports(wasmExports);
|
|
|
|
updateMemoryViews();
|
|
|
|
removeRunDependency('wasm-instantiate');
|
|
return wasmExports;
|
|
}
|
|
addRunDependency('wasm-instantiate');
|
|
|
|
// Prefer streaming instantiation if available.
|
|
// Async compilation can be confusing when an error on the page overwrites Module
|
|
// (for example, if the order of elements is wrong, and the one defining Module is
|
|
// later), so we save Module and check it later.
|
|
var trueModule = Module;
|
|
function receiveInstantiationResult(result) {
|
|
// 'result' is a ResultObject object which has both the module and instance.
|
|
// receiveInstance() will swap in the exports (to Module.asm) so they can be called
|
|
assert(Module === trueModule, 'the Module object should not be replaced during async compilation - perhaps the order of HTML elements is wrong?');
|
|
trueModule = null;
|
|
// TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line.
|
|
// When the regression is fixed, can restore the above PTHREADS-enabled path.
|
|
return receiveInstance(result['instance']);
|
|
}
|
|
|
|
var info = getWasmImports();
|
|
|
|
// User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback
|
|
// to manually instantiate the Wasm module themselves. This allows pages to
|
|
// run the instantiation parallel to any other async startup actions they are
|
|
// performing.
|
|
// Also pthreads and wasm workers initialize the wasm instance through this
|
|
// path.
|
|
if (Module['instantiateWasm']) {
|
|
return new Promise((resolve, reject) => {
|
|
try {
|
|
Module['instantiateWasm'](info, (inst, mod) => {
|
|
resolve(receiveInstance(inst, mod));
|
|
});
|
|
} catch(e) {
|
|
err(`Module.instantiateWasm callback failed with error: ${e}`);
|
|
reject(e);
|
|
}
|
|
});
|
|
}
|
|
|
|
wasmBinaryFile ??= findWasmBinary();
|
|
var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info);
|
|
var exports = receiveInstantiationResult(result);
|
|
return exports;
|
|
}
|
|
|
|
// end include: preamble.js
|
|
|
|
// Begin JS library code
|
|
|
|
|
|
class ExitStatus {
|
|
name = 'ExitStatus';
|
|
constructor(status) {
|
|
this.message = `Program terminated with exit(${status})`;
|
|
this.status = status;
|
|
}
|
|
}
|
|
|
|
var callRuntimeCallbacks = (callbacks) => {
|
|
while (callbacks.length > 0) {
|
|
// Pass the module as the first argument.
|
|
callbacks.shift()(Module);
|
|
}
|
|
};
|
|
var onPostRuns = [];
|
|
var addOnPostRun = (cb) => onPostRuns.push(cb);
|
|
|
|
var onPreRuns = [];
|
|
var addOnPreRun = (cb) => onPreRuns.push(cb);
|
|
|
|
var runDependencies = 0;
|
|
|
|
|
|
var dependenciesFulfilled = null;
|
|
|
|
var runDependencyTracking = {
|
|
};
|
|
|
|
var runDependencyWatcher = null;
|
|
var removeRunDependency = (id) => {
|
|
runDependencies--;
|
|
|
|
Module['monitorRunDependencies']?.(runDependencies);
|
|
|
|
assert(id, 'removeRunDependency requires an ID');
|
|
assert(runDependencyTracking[id]);
|
|
delete runDependencyTracking[id];
|
|
if (runDependencies == 0) {
|
|
if (runDependencyWatcher !== null) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
}
|
|
if (dependenciesFulfilled) {
|
|
var callback = dependenciesFulfilled;
|
|
dependenciesFulfilled = null;
|
|
callback(); // can add another dependenciesFulfilled
|
|
}
|
|
}
|
|
};
|
|
|
|
|
|
var addRunDependency = (id) => {
|
|
runDependencies++;
|
|
|
|
Module['monitorRunDependencies']?.(runDependencies);
|
|
|
|
assert(id, 'addRunDependency requires an ID')
|
|
assert(!runDependencyTracking[id]);
|
|
runDependencyTracking[id] = 1;
|
|
if (runDependencyWatcher === null && globalThis.setInterval) {
|
|
// Check for missing dependencies every few seconds
|
|
runDependencyWatcher = setInterval(() => {
|
|
if (ABORT) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
return;
|
|
}
|
|
var shown = false;
|
|
for (var dep in runDependencyTracking) {
|
|
if (!shown) {
|
|
shown = true;
|
|
err('still waiting on run dependencies:');
|
|
}
|
|
err(`dependency: ${dep}`);
|
|
}
|
|
if (shown) {
|
|
err('(end of list)');
|
|
}
|
|
}, 10000);
|
|
}
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
* @param {number} ptr
|
|
* @param {string} type
|
|
*/
|
|
function getValue(ptr, type = 'i8') {
|
|
if (type.endsWith('*')) type = '*';
|
|
switch (type) {
|
|
case 'i1': return HEAP8[ptr];
|
|
case 'i8': return HEAP8[ptr];
|
|
case 'i16': return HEAP16[((ptr)>>1)];
|
|
case 'i32': return HEAP32[((ptr)>>2)];
|
|
case 'i64': return HEAP64[((ptr)>>3)];
|
|
case 'float': return HEAPF32[((ptr)>>2)];
|
|
case 'double': return HEAPF64[((ptr)>>3)];
|
|
case '*': return HEAPU32[((ptr)>>2)];
|
|
default: abort(`invalid type for getValue: ${type}`);
|
|
}
|
|
}
|
|
|
|
var noExitRuntime = true;
|
|
|
|
var ptrToString = (ptr) => {
|
|
assert(typeof ptr === 'number', `ptrToString expects a number, got ${typeof ptr}`);
|
|
// Convert to 32-bit unsigned value
|
|
ptr >>>= 0;
|
|
return '0x' + ptr.toString(16).padStart(8, '0');
|
|
};
|
|
|
|
|
|
var setStackLimits = () => {
|
|
var stackLow = _emscripten_stack_get_base();
|
|
var stackHigh = _emscripten_stack_get_end();
|
|
___set_stack_limits(stackLow, stackHigh);
|
|
};
|
|
|
|
|
|
/**
|
|
* @param {number} ptr
|
|
* @param {number} value
|
|
* @param {string} type
|
|
*/
|
|
function setValue(ptr, value, type = 'i8') {
|
|
if (type.endsWith('*')) type = '*';
|
|
switch (type) {
|
|
case 'i1': HEAP8[ptr] = value;checkInt8(value); break;
|
|
case 'i8': HEAP8[ptr] = value;checkInt8(value); break;
|
|
case 'i16': HEAP16[((ptr)>>1)] = value;checkInt16(value); break;
|
|
case 'i32': HEAP32[((ptr)>>2)] = value;checkInt32(value); break;
|
|
case 'i64': HEAP64[((ptr)>>3)] = BigInt(value);checkInt64(value); break;
|
|
case 'float': HEAPF32[((ptr)>>2)] = value; break;
|
|
case 'double': HEAPF64[((ptr)>>3)] = value; break;
|
|
case '*': HEAPU32[((ptr)>>2)] = value; break;
|
|
default: abort(`invalid type for setValue: ${type}`);
|
|
}
|
|
}
|
|
|
|
var stackRestore = (val) => __emscripten_stack_restore(val);
|
|
|
|
var stackSave = () => _emscripten_stack_get_current();
|
|
|
|
var warnOnce = (text) => {
|
|
warnOnce.shown ||= {};
|
|
if (!warnOnce.shown[text]) {
|
|
warnOnce.shown[text] = 1;
|
|
err(text);
|
|
}
|
|
};
|
|
|
|
|
|
|
|
var UTF8Decoder = globalThis.TextDecoder && new TextDecoder();
|
|
|
|
var findStringEnd = (heapOrArray, idx, maxBytesToRead, ignoreNul) => {
|
|
var maxIdx = idx + maxBytesToRead;
|
|
if (ignoreNul) return maxIdx;
|
|
// TextDecoder needs to know the byte length in advance, it doesn't stop on
|
|
// null terminator by itself.
|
|
// As a tiny code save trick, compare idx against maxIdx using a negation,
|
|
// so that maxBytesToRead=undefined/NaN means Infinity.
|
|
while (heapOrArray[idx] && !(idx >= maxIdx)) ++idx;
|
|
return idx;
|
|
};
|
|
|
|
|
|
/**
|
|
* Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
|
|
* array that contains uint8 values, returns a copy of that string as a
|
|
* Javascript String object.
|
|
* heapOrArray is either a regular array, or a JavaScript typed array view.
|
|
* @param {number=} idx
|
|
* @param {number=} maxBytesToRead
|
|
* @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character.
|
|
* @return {string}
|
|
*/
|
|
var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead, ignoreNul) => {
|
|
|
|
var endPtr = findStringEnd(heapOrArray, idx, maxBytesToRead, ignoreNul);
|
|
|
|
// When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it.
|
|
if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) {
|
|
return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr));
|
|
}
|
|
var str = '';
|
|
while (idx < endPtr) {
|
|
// For UTF8 byte structure, see:
|
|
// http://en.wikipedia.org/wiki/UTF-8#Description
|
|
// https://www.ietf.org/rfc/rfc2279.txt
|
|
// https://tools.ietf.org/html/rfc3629
|
|
var u0 = heapOrArray[idx++];
|
|
if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; }
|
|
var u1 = heapOrArray[idx++] & 63;
|
|
if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; }
|
|
var u2 = heapOrArray[idx++] & 63;
|
|
if ((u0 & 0xF0) == 0xE0) {
|
|
u0 = ((u0 & 15) << 12) | (u1 << 6) | u2;
|
|
} else {
|
|
if ((u0 & 0xF8) != 0xF0) warnOnce('Invalid UTF-8 leading byte ' + ptrToString(u0) + ' encountered when deserializing a UTF-8 string in wasm memory to a JS string!');
|
|
u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63);
|
|
}
|
|
|
|
if (u0 < 0x10000) {
|
|
str += String.fromCharCode(u0);
|
|
} else {
|
|
var ch = u0 - 0x10000;
|
|
str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF));
|
|
}
|
|
}
|
|
return str;
|
|
};
|
|
|
|
/**
|
|
* Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
|
|
* emscripten HEAP, returns a copy of that string as a Javascript String object.
|
|
*
|
|
* @param {number} ptr
|
|
* @param {number=} maxBytesToRead - An optional length that specifies the
|
|
* maximum number of bytes to read. You can omit this parameter to scan the
|
|
* string until the first 0 byte. If maxBytesToRead is passed, and the string
|
|
* at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
|
|
* string will cut short at that byte index.
|
|
* @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character.
|
|
* @return {string}
|
|
*/
|
|
var UTF8ToString = (ptr, maxBytesToRead, ignoreNul) => {
|
|
assert(typeof ptr == 'number', `UTF8ToString expects a number (got ${typeof ptr})`);
|
|
return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead, ignoreNul) : '';
|
|
};
|
|
var ___assert_fail = (condition, filename, line, func) =>
|
|
abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [filename ? UTF8ToString(filename) : 'unknown filename', line, func ? UTF8ToString(func) : 'unknown function']);
|
|
|
|
|
|
|
|
var ___handle_stack_overflow = (requested) => {
|
|
var base = _emscripten_stack_get_base();
|
|
var end = _emscripten_stack_get_end();
|
|
abort(`stack overflow (Attempt to set SP to ${ptrToString(requested)}` +
|
|
`, with stack limits [${ptrToString(end)} - ${ptrToString(base)}` +
|
|
']). If you require more stack space build with -sSTACK_SIZE=<bytes>');
|
|
};
|
|
|
|
var PATH = {
|
|
isAbs:(path) => path.charAt(0) === '/',
|
|
splitPath:(filename) => {
|
|
var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
|
|
return splitPathRe.exec(filename).slice(1);
|
|
},
|
|
normalizeArray:(parts, allowAboveRoot) => {
|
|
// if the path tries to go above the root, `up` ends up > 0
|
|
var up = 0;
|
|
for (var i = parts.length - 1; i >= 0; i--) {
|
|
var last = parts[i];
|
|
if (last === '.') {
|
|
parts.splice(i, 1);
|
|
} else if (last === '..') {
|
|
parts.splice(i, 1);
|
|
up++;
|
|
} else if (up) {
|
|
parts.splice(i, 1);
|
|
up--;
|
|
}
|
|
}
|
|
// if the path is allowed to go above the root, restore leading ..s
|
|
if (allowAboveRoot) {
|
|
for (; up; up--) {
|
|
parts.unshift('..');
|
|
}
|
|
}
|
|
return parts;
|
|
},
|
|
normalize:(path) => {
|
|
var isAbsolute = PATH.isAbs(path),
|
|
trailingSlash = path.slice(-1) === '/';
|
|
// Normalize the path
|
|
path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/');
|
|
if (!path && !isAbsolute) {
|
|
path = '.';
|
|
}
|
|
if (path && trailingSlash) {
|
|
path += '/';
|
|
}
|
|
return (isAbsolute ? '/' : '') + path;
|
|
},
|
|
dirname:(path) => {
|
|
var result = PATH.splitPath(path),
|
|
root = result[0],
|
|
dir = result[1];
|
|
if (!root && !dir) {
|
|
// No dirname whatsoever
|
|
return '.';
|
|
}
|
|
if (dir) {
|
|
// It has a dirname, strip trailing slash
|
|
dir = dir.slice(0, -1);
|
|
}
|
|
return root + dir;
|
|
},
|
|
basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1],
|
|
join:(...paths) => PATH.normalize(paths.join('/')),
|
|
join2:(l, r) => PATH.normalize(l + '/' + r),
|
|
};
|
|
|
|
var initRandomFill = () => {
|
|
|
|
return (view) => crypto.getRandomValues(view);
|
|
};
|
|
var randomFill = (view) => {
|
|
// Lazily init on the first invocation.
|
|
(randomFill = initRandomFill())(view);
|
|
};
|
|
|
|
|
|
|
|
var PATH_FS = {
|
|
resolve:(...args) => {
|
|
var resolvedPath = '',
|
|
resolvedAbsolute = false;
|
|
for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) {
|
|
var path = (i >= 0) ? args[i] : FS.cwd();
|
|
// Skip empty and invalid entries
|
|
if (typeof path != 'string') {
|
|
throw new TypeError('Arguments to path.resolve must be strings');
|
|
} else if (!path) {
|
|
return ''; // an invalid portion invalidates the whole thing
|
|
}
|
|
resolvedPath = path + '/' + resolvedPath;
|
|
resolvedAbsolute = PATH.isAbs(path);
|
|
}
|
|
// At this point the path should be resolved to a full absolute path, but
|
|
// handle relative paths to be safe (might happen when process.cwd() fails)
|
|
resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/');
|
|
return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
|
|
},
|
|
relative:(from, to) => {
|
|
from = PATH_FS.resolve(from).slice(1);
|
|
to = PATH_FS.resolve(to).slice(1);
|
|
function trim(arr) {
|
|
var start = 0;
|
|
for (; start < arr.length; start++) {
|
|
if (arr[start] !== '') break;
|
|
}
|
|
var end = arr.length - 1;
|
|
for (; end >= 0; end--) {
|
|
if (arr[end] !== '') break;
|
|
}
|
|
if (start > end) return [];
|
|
return arr.slice(start, end - start + 1);
|
|
}
|
|
var fromParts = trim(from.split('/'));
|
|
var toParts = trim(to.split('/'));
|
|
var length = Math.min(fromParts.length, toParts.length);
|
|
var samePartsLength = length;
|
|
for (var i = 0; i < length; i++) {
|
|
if (fromParts[i] !== toParts[i]) {
|
|
samePartsLength = i;
|
|
break;
|
|
}
|
|
}
|
|
var outputParts = [];
|
|
for (var i = samePartsLength; i < fromParts.length; i++) {
|
|
outputParts.push('..');
|
|
}
|
|
outputParts = outputParts.concat(toParts.slice(samePartsLength));
|
|
return outputParts.join('/');
|
|
},
|
|
};
|
|
|
|
|
|
|
|
var FS_stdin_getChar_buffer = [];
|
|
|
|
var lengthBytesUTF8 = (str) => {
|
|
var len = 0;
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
|
|
// unit, not a Unicode code point of the character! So decode
|
|
// UTF16->UTF32->UTF8.
|
|
// See http://unicode.org/faq/utf_bom.html#utf16-3
|
|
var c = str.charCodeAt(i); // possibly a lead surrogate
|
|
if (c <= 0x7F) {
|
|
len++;
|
|
} else if (c <= 0x7FF) {
|
|
len += 2;
|
|
} else if (c >= 0xD800 && c <= 0xDFFF) {
|
|
len += 4; ++i;
|
|
} else {
|
|
len += 3;
|
|
}
|
|
}
|
|
return len;
|
|
};
|
|
|
|
var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
|
|
assert(typeof str === 'string', `stringToUTF8Array expects a string (got ${typeof str})`);
|
|
// Parameter maxBytesToWrite is not optional. Negative values, 0, null,
|
|
// undefined and false each don't write out any bytes.
|
|
if (!(maxBytesToWrite > 0))
|
|
return 0;
|
|
|
|
var startIdx = outIdx;
|
|
var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator.
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description
|
|
// and https://www.ietf.org/rfc/rfc2279.txt
|
|
// and https://tools.ietf.org/html/rfc3629
|
|
var u = str.codePointAt(i);
|
|
if (u <= 0x7F) {
|
|
if (outIdx >= endIdx) break;
|
|
heap[outIdx++] = u;
|
|
} else if (u <= 0x7FF) {
|
|
if (outIdx + 1 >= endIdx) break;
|
|
heap[outIdx++] = 0xC0 | (u >> 6);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
} else if (u <= 0xFFFF) {
|
|
if (outIdx + 2 >= endIdx) break;
|
|
heap[outIdx++] = 0xE0 | (u >> 12);
|
|
heap[outIdx++] = 0x80 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
} else {
|
|
if (outIdx + 3 >= endIdx) break;
|
|
if (u > 0x10FFFF) warnOnce('Invalid Unicode code point ' + ptrToString(u) + ' encountered when serializing a JS string to a UTF-8 string in wasm memory! (Valid unicode code points should be in range 0-0x10FFFF).');
|
|
heap[outIdx++] = 0xF0 | (u >> 18);
|
|
heap[outIdx++] = 0x80 | ((u >> 12) & 63);
|
|
heap[outIdx++] = 0x80 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 0x80 | (u & 63);
|
|
// Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16.
|
|
// We need to manually skip over the second code unit for correct iteration.
|
|
i++;
|
|
}
|
|
}
|
|
// Null-terminate the pointer to the buffer.
|
|
heap[outIdx] = 0;
|
|
return outIdx - startIdx;
|
|
};
|
|
/** @type {function(string, boolean=, number=)} */
|
|
var intArrayFromString = (stringy, dontAddNull, length) => {
|
|
var len = length > 0 ? length : lengthBytesUTF8(stringy)+1;
|
|
var u8array = new Array(len);
|
|
var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length);
|
|
if (dontAddNull) u8array.length = numBytesWritten;
|
|
return u8array;
|
|
};
|
|
var FS_stdin_getChar = () => {
|
|
if (!FS_stdin_getChar_buffer.length) {
|
|
var result = null;
|
|
if (globalThis.window?.prompt) {
|
|
// Browser.
|
|
result = window.prompt('Input: '); // returns null on cancel
|
|
if (result !== null) {
|
|
result += '\n';
|
|
}
|
|
} else
|
|
{}
|
|
if (!result) {
|
|
return null;
|
|
}
|
|
FS_stdin_getChar_buffer = intArrayFromString(result, true);
|
|
}
|
|
return FS_stdin_getChar_buffer.shift();
|
|
};
|
|
var TTY = {
|
|
ttys:[],
|
|
init() {
|
|
// https://github.com/emscripten-core/emscripten/pull/1555
|
|
// if (ENVIRONMENT_IS_NODE) {
|
|
// // currently, FS.init does not distinguish if process.stdin is a file or TTY
|
|
// // device, it always assumes it's a TTY device. because of this, we're forcing
|
|
// // process.stdin to UTF8 encoding to at least make stdin reading compatible
|
|
// // with text files until FS.init can be refactored.
|
|
// process.stdin.setEncoding('utf8');
|
|
// }
|
|
},
|
|
shutdown() {
|
|
// https://github.com/emscripten-core/emscripten/pull/1555
|
|
// if (ENVIRONMENT_IS_NODE) {
|
|
// // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)?
|
|
// // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation
|
|
// // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists?
|
|
// // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle
|
|
// // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call
|
|
// process.stdin.pause();
|
|
// }
|
|
},
|
|
register(dev, ops) {
|
|
TTY.ttys[dev] = { input: [], output: [], ops: ops };
|
|
FS.registerDevice(dev, TTY.stream_ops);
|
|
},
|
|
stream_ops:{
|
|
open(stream) {
|
|
var tty = TTY.ttys[stream.node.rdev];
|
|
if (!tty) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
stream.tty = tty;
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
fsync(stream) {
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
read(stream, buffer, offset, length, pos /* ignored */) {
|
|
if (!stream.tty || !stream.tty.ops.get_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = stream.tty.ops.get_char(stream.tty);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset+i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
if (!stream.tty || !stream.tty.ops.put_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
try {
|
|
for (var i = 0; i < length; i++) {
|
|
stream.tty.ops.put_char(stream.tty, buffer[offset+i]);
|
|
}
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
},
|
|
},
|
|
default_tty_ops:{
|
|
get_char(tty) {
|
|
return FS_stdin_getChar();
|
|
},
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle.
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
},
|
|
ioctl_tcgets(tty) {
|
|
// typical setting
|
|
return {
|
|
c_iflag: 25856,
|
|
c_oflag: 5,
|
|
c_cflag: 191,
|
|
c_lflag: 35387,
|
|
c_cc: [
|
|
0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00,
|
|
0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
]
|
|
};
|
|
},
|
|
ioctl_tcsets(tty, optional_actions, data) {
|
|
// currently just ignore
|
|
return 0;
|
|
},
|
|
ioctl_tiocgwinsz(tty) {
|
|
return [24, 80];
|
|
},
|
|
},
|
|
default_tty1_ops:{
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val);
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
},
|
|
},
|
|
};
|
|
|
|
|
|
var zeroMemory = (ptr, size) => HEAPU8.fill(0, ptr, ptr + size);
|
|
|
|
var alignMemory = (size, alignment) => {
|
|
assert(alignment, "alignment argument is required");
|
|
return Math.ceil(size / alignment) * alignment;
|
|
};
|
|
var mmapAlloc = (size) => {
|
|
size = alignMemory(size, 65536);
|
|
var ptr = _emscripten_builtin_memalign(65536, size);
|
|
if (ptr) zeroMemory(ptr, size);
|
|
return ptr;
|
|
};
|
|
var MEMFS = {
|
|
ops_table:null,
|
|
mount(mount) {
|
|
return MEMFS.createNode(null, '/', 16895, 0);
|
|
},
|
|
createNode(parent, name, mode, dev) {
|
|
if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
|
|
// not supported
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
MEMFS.ops_table ||= {
|
|
dir: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
lookup: MEMFS.node_ops.lookup,
|
|
mknod: MEMFS.node_ops.mknod,
|
|
rename: MEMFS.node_ops.rename,
|
|
unlink: MEMFS.node_ops.unlink,
|
|
rmdir: MEMFS.node_ops.rmdir,
|
|
readdir: MEMFS.node_ops.readdir,
|
|
symlink: MEMFS.node_ops.symlink
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek
|
|
}
|
|
},
|
|
file: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek,
|
|
read: MEMFS.stream_ops.read,
|
|
write: MEMFS.stream_ops.write,
|
|
mmap: MEMFS.stream_ops.mmap,
|
|
msync: MEMFS.stream_ops.msync
|
|
}
|
|
},
|
|
link: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
readlink: MEMFS.node_ops.readlink
|
|
},
|
|
stream: {}
|
|
},
|
|
chrdev: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: FS.chrdev_stream_ops
|
|
}
|
|
};
|
|
var node = FS.createNode(parent, name, mode, dev);
|
|
if (FS.isDir(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.dir.node;
|
|
node.stream_ops = MEMFS.ops_table.dir.stream;
|
|
node.contents = {};
|
|
} else if (FS.isFile(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.file.node;
|
|
node.stream_ops = MEMFS.ops_table.file.stream;
|
|
node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity.
|
|
// When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred
|
|
// for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size
|
|
// penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme.
|
|
node.contents = null;
|
|
} else if (FS.isLink(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.link.node;
|
|
node.stream_ops = MEMFS.ops_table.link.stream;
|
|
} else if (FS.isChrdev(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.chrdev.node;
|
|
node.stream_ops = MEMFS.ops_table.chrdev.stream;
|
|
}
|
|
node.atime = node.mtime = node.ctime = Date.now();
|
|
// add the new node to the parent
|
|
if (parent) {
|
|
parent.contents[name] = node;
|
|
parent.atime = parent.mtime = parent.ctime = node.atime;
|
|
}
|
|
return node;
|
|
},
|
|
getFileDataAsTypedArray(node) {
|
|
if (!node.contents) return new Uint8Array(0);
|
|
if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes.
|
|
return new Uint8Array(node.contents);
|
|
},
|
|
expandFileStorage(node, newCapacity) {
|
|
var prevCapacity = node.contents ? node.contents.length : 0;
|
|
if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough.
|
|
// Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity.
|
|
// For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to
|
|
// avoid overshooting the allocation cap by a very large margin.
|
|
var CAPACITY_DOUBLING_MAX = 1024 * 1024;
|
|
newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0);
|
|
if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding.
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newCapacity); // Allocate new storage.
|
|
if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage.
|
|
},
|
|
resizeFileStorage(node, newSize) {
|
|
if (node.usedBytes == newSize) return;
|
|
if (newSize == 0) {
|
|
node.contents = null; // Fully decommit when requesting a resize to zero.
|
|
node.usedBytes = 0;
|
|
} else {
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newSize); // Allocate new storage.
|
|
if (oldContents) {
|
|
node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage.
|
|
}
|
|
node.usedBytes = newSize;
|
|
}
|
|
},
|
|
node_ops:{
|
|
getattr(node) {
|
|
var attr = {};
|
|
// device numbers reuse inode numbers.
|
|
attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
|
|
attr.ino = node.id;
|
|
attr.mode = node.mode;
|
|
attr.nlink = 1;
|
|
attr.uid = 0;
|
|
attr.gid = 0;
|
|
attr.rdev = node.rdev;
|
|
if (FS.isDir(node.mode)) {
|
|
attr.size = 4096;
|
|
} else if (FS.isFile(node.mode)) {
|
|
attr.size = node.usedBytes;
|
|
} else if (FS.isLink(node.mode)) {
|
|
attr.size = node.link.length;
|
|
} else {
|
|
attr.size = 0;
|
|
}
|
|
attr.atime = new Date(node.atime);
|
|
attr.mtime = new Date(node.mtime);
|
|
attr.ctime = new Date(node.ctime);
|
|
// NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
|
|
// but this is not required by the standard.
|
|
attr.blksize = 4096;
|
|
attr.blocks = Math.ceil(attr.size / attr.blksize);
|
|
return attr;
|
|
},
|
|
setattr(node, attr) {
|
|
for (const key of ["mode", "atime", "mtime", "ctime"]) {
|
|
if (attr[key] != null) {
|
|
node[key] = attr[key];
|
|
}
|
|
}
|
|
if (attr.size !== undefined) {
|
|
MEMFS.resizeFileStorage(node, attr.size);
|
|
}
|
|
},
|
|
lookup(parent, name) {
|
|
throw new FS.ErrnoError(44);
|
|
},
|
|
mknod(parent, name, mode, dev) {
|
|
return MEMFS.createNode(parent, name, mode, dev);
|
|
},
|
|
rename(old_node, new_dir, new_name) {
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {}
|
|
if (new_node) {
|
|
if (FS.isDir(old_node.mode)) {
|
|
// if we're overwriting a directory at new_name, make sure it's empty.
|
|
for (var i in new_node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
}
|
|
FS.hashRemoveNode(new_node);
|
|
}
|
|
// do the internal rewiring
|
|
delete old_node.parent.contents[old_node.name];
|
|
new_dir.contents[new_name] = old_node;
|
|
old_node.name = new_name;
|
|
new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now();
|
|
},
|
|
unlink(parent, name) {
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
rmdir(parent, name) {
|
|
var node = FS.lookupNode(parent, name);
|
|
for (var i in node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
readdir(node) {
|
|
return ['.', '..', ...Object.keys(node.contents)];
|
|
},
|
|
symlink(parent, newname, oldpath) {
|
|
var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0);
|
|
node.link = oldpath;
|
|
return node;
|
|
},
|
|
readlink(node) {
|
|
if (!FS.isLink(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return node.link;
|
|
},
|
|
},
|
|
stream_ops:{
|
|
read(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= stream.node.usedBytes) return 0;
|
|
var size = Math.min(stream.node.usedBytes - position, length);
|
|
assert(size >= 0);
|
|
if (size > 8 && contents.subarray) { // non-trivial, and typed array
|
|
buffer.set(contents.subarray(position, position + size), offset);
|
|
} else {
|
|
for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
|
|
}
|
|
return size;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
// The data buffer should be a typed array view
|
|
assert(!(buffer instanceof ArrayBuffer));
|
|
// If the buffer is located in main memory (HEAP), and if
|
|
// memory can grow, we can't hold on to references of the
|
|
// memory buffer, as they may get invalidated. That means we
|
|
// need to copy its contents.
|
|
if (buffer.buffer === HEAP8.buffer) {
|
|
canOwn = false;
|
|
}
|
|
|
|
if (!length) return 0;
|
|
var node = stream.node;
|
|
node.mtime = node.ctime = Date.now();
|
|
|
|
if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array?
|
|
if (canOwn) {
|
|
assert(position === 0, 'canOwn must imply no weird position inside the file');
|
|
node.contents = buffer.subarray(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data.
|
|
node.contents = buffer.slice(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file?
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
return length;
|
|
}
|
|
}
|
|
|
|
// Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
|
|
MEMFS.expandFileStorage(node, position+length);
|
|
if (node.contents.subarray && buffer.subarray) {
|
|
// Use typed array write which is available.
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
} else {
|
|
for (var i = 0; i < length; i++) {
|
|
node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not.
|
|
}
|
|
}
|
|
node.usedBytes = Math.max(node.usedBytes, position + length);
|
|
return length;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
var position = offset;
|
|
if (whence === 1) {
|
|
position += stream.position;
|
|
} else if (whence === 2) {
|
|
if (FS.isFile(stream.node.mode)) {
|
|
position += stream.node.usedBytes;
|
|
}
|
|
}
|
|
if (position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return position;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
var ptr;
|
|
var allocated;
|
|
var contents = stream.node.contents;
|
|
// Only make a new copy when MAP_PRIVATE is specified.
|
|
if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) {
|
|
// We can't emulate MAP_SHARED when the file is not backed by the
|
|
// buffer we're mapping to (e.g. the HEAP buffer).
|
|
allocated = false;
|
|
ptr = contents.byteOffset;
|
|
} else {
|
|
allocated = true;
|
|
ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
if (contents) {
|
|
// Try to avoid unnecessary slices.
|
|
if (position > 0 || position + length < contents.length) {
|
|
if (contents.subarray) {
|
|
contents = contents.subarray(position, position + length);
|
|
} else {
|
|
contents = Array.prototype.slice.call(contents, position, position + length);
|
|
}
|
|
}
|
|
HEAP8.set(contents, ptr);
|
|
}
|
|
}
|
|
return { ptr, allocated };
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
|
|
// should we check if bytesWritten and length are the same?
|
|
return 0;
|
|
},
|
|
},
|
|
};
|
|
|
|
var FS_modeStringToFlags = (str) => {
|
|
var flagModes = {
|
|
'r': 0,
|
|
'r+': 2,
|
|
'w': 512 | 64 | 1,
|
|
'w+': 512 | 64 | 2,
|
|
'a': 1024 | 64 | 1,
|
|
'a+': 1024 | 64 | 2,
|
|
};
|
|
var flags = flagModes[str];
|
|
if (typeof flags == 'undefined') {
|
|
throw new Error(`Unknown file open mode: ${str}`);
|
|
}
|
|
return flags;
|
|
};
|
|
|
|
var FS_getMode = (canRead, canWrite) => {
|
|
var mode = 0;
|
|
if (canRead) mode |= 292 | 73;
|
|
if (canWrite) mode |= 146;
|
|
return mode;
|
|
};
|
|
|
|
|
|
|
|
|
|
var strError = (errno) => UTF8ToString(_strerror(errno));
|
|
|
|
var ERRNO_CODES = {
|
|
'EPERM': 63,
|
|
'ENOENT': 44,
|
|
'ESRCH': 71,
|
|
'EINTR': 27,
|
|
'EIO': 29,
|
|
'ENXIO': 60,
|
|
'E2BIG': 1,
|
|
'ENOEXEC': 45,
|
|
'EBADF': 8,
|
|
'ECHILD': 12,
|
|
'EAGAIN': 6,
|
|
'EWOULDBLOCK': 6,
|
|
'ENOMEM': 48,
|
|
'EACCES': 2,
|
|
'EFAULT': 21,
|
|
'ENOTBLK': 105,
|
|
'EBUSY': 10,
|
|
'EEXIST': 20,
|
|
'EXDEV': 75,
|
|
'ENODEV': 43,
|
|
'ENOTDIR': 54,
|
|
'EISDIR': 31,
|
|
'EINVAL': 28,
|
|
'ENFILE': 41,
|
|
'EMFILE': 33,
|
|
'ENOTTY': 59,
|
|
'ETXTBSY': 74,
|
|
'EFBIG': 22,
|
|
'ENOSPC': 51,
|
|
'ESPIPE': 70,
|
|
'EROFS': 69,
|
|
'EMLINK': 34,
|
|
'EPIPE': 64,
|
|
'EDOM': 18,
|
|
'ERANGE': 68,
|
|
'ENOMSG': 49,
|
|
'EIDRM': 24,
|
|
'ECHRNG': 106,
|
|
'EL2NSYNC': 156,
|
|
'EL3HLT': 107,
|
|
'EL3RST': 108,
|
|
'ELNRNG': 109,
|
|
'EUNATCH': 110,
|
|
'ENOCSI': 111,
|
|
'EL2HLT': 112,
|
|
'EDEADLK': 16,
|
|
'ENOLCK': 46,
|
|
'EBADE': 113,
|
|
'EBADR': 114,
|
|
'EXFULL': 115,
|
|
'ENOANO': 104,
|
|
'EBADRQC': 103,
|
|
'EBADSLT': 102,
|
|
'EDEADLOCK': 16,
|
|
'EBFONT': 101,
|
|
'ENOSTR': 100,
|
|
'ENODATA': 116,
|
|
'ETIME': 117,
|
|
'ENOSR': 118,
|
|
'ENONET': 119,
|
|
'ENOPKG': 120,
|
|
'EREMOTE': 121,
|
|
'ENOLINK': 47,
|
|
'EADV': 122,
|
|
'ESRMNT': 123,
|
|
'ECOMM': 124,
|
|
'EPROTO': 65,
|
|
'EMULTIHOP': 36,
|
|
'EDOTDOT': 125,
|
|
'EBADMSG': 9,
|
|
'ENOTUNIQ': 126,
|
|
'EBADFD': 127,
|
|
'EREMCHG': 128,
|
|
'ELIBACC': 129,
|
|
'ELIBBAD': 130,
|
|
'ELIBSCN': 131,
|
|
'ELIBMAX': 132,
|
|
'ELIBEXEC': 133,
|
|
'ENOSYS': 52,
|
|
'ENOTEMPTY': 55,
|
|
'ENAMETOOLONG': 37,
|
|
'ELOOP': 32,
|
|
'EOPNOTSUPP': 138,
|
|
'EPFNOSUPPORT': 139,
|
|
'ECONNRESET': 15,
|
|
'ENOBUFS': 42,
|
|
'EAFNOSUPPORT': 5,
|
|
'EPROTOTYPE': 67,
|
|
'ENOTSOCK': 57,
|
|
'ENOPROTOOPT': 50,
|
|
'ESHUTDOWN': 140,
|
|
'ECONNREFUSED': 14,
|
|
'EADDRINUSE': 3,
|
|
'ECONNABORTED': 13,
|
|
'ENETUNREACH': 40,
|
|
'ENETDOWN': 38,
|
|
'ETIMEDOUT': 73,
|
|
'EHOSTDOWN': 142,
|
|
'EHOSTUNREACH': 23,
|
|
'EINPROGRESS': 26,
|
|
'EALREADY': 7,
|
|
'EDESTADDRREQ': 17,
|
|
'EMSGSIZE': 35,
|
|
'EPROTONOSUPPORT': 66,
|
|
'ESOCKTNOSUPPORT': 137,
|
|
'EADDRNOTAVAIL': 4,
|
|
'ENETRESET': 39,
|
|
'EISCONN': 30,
|
|
'ENOTCONN': 53,
|
|
'ETOOMANYREFS': 141,
|
|
'EUSERS': 136,
|
|
'EDQUOT': 19,
|
|
'ESTALE': 72,
|
|
'ENOTSUP': 138,
|
|
'ENOMEDIUM': 148,
|
|
'EILSEQ': 25,
|
|
'EOVERFLOW': 61,
|
|
'ECANCELED': 11,
|
|
'ENOTRECOVERABLE': 56,
|
|
'EOWNERDEAD': 62,
|
|
'ESTRPIPE': 135,
|
|
};
|
|
|
|
var asyncLoad = async (url) => {
|
|
var arrayBuffer = await readAsync(url);
|
|
assert(arrayBuffer, `Loading data file "${url}" failed (no arrayBuffer).`);
|
|
return new Uint8Array(arrayBuffer);
|
|
};
|
|
|
|
|
|
var FS_createDataFile = (...args) => FS.createDataFile(...args);
|
|
|
|
var getUniqueRunDependency = (id) => {
|
|
var orig = id;
|
|
while (1) {
|
|
if (!runDependencyTracking[id]) return id;
|
|
id = orig + Math.random();
|
|
}
|
|
};
|
|
|
|
|
|
|
|
var preloadPlugins = [];
|
|
var FS_handledByPreloadPlugin = async (byteArray, fullname) => {
|
|
// Ensure plugins are ready.
|
|
if (typeof Browser != 'undefined') Browser.init();
|
|
|
|
for (var plugin of preloadPlugins) {
|
|
if (plugin['canHandle'](fullname)) {
|
|
assert(plugin['handle'].constructor.name === 'AsyncFunction', 'Filesystem plugin handlers must be async functions (See #24914)')
|
|
return plugin['handle'](byteArray, fullname);
|
|
}
|
|
}
|
|
// If no plugin handled this file then return the original/unmodified
|
|
// byteArray.
|
|
return byteArray;
|
|
};
|
|
var FS_preloadFile = async (parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish) => {
|
|
// TODO we should allow people to just pass in a complete filename instead
|
|
// of parent and name being that we just join them anyways
|
|
var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
|
|
var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname
|
|
addRunDependency(dep);
|
|
|
|
try {
|
|
var byteArray = url;
|
|
if (typeof url == 'string') {
|
|
byteArray = await asyncLoad(url);
|
|
}
|
|
|
|
byteArray = await FS_handledByPreloadPlugin(byteArray, fullname);
|
|
preFinish?.();
|
|
if (!dontCreateFile) {
|
|
FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
|
|
}
|
|
} finally {
|
|
removeRunDependency(dep);
|
|
}
|
|
};
|
|
var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => {
|
|
FS_preloadFile(parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish).then(onload).catch(onerror);
|
|
};
|
|
var FS = {
|
|
root:null,
|
|
mounts:[],
|
|
devices:{
|
|
},
|
|
streams:[],
|
|
nextInode:1,
|
|
nameTable:null,
|
|
currentPath:"/",
|
|
initialized:false,
|
|
ignorePermissions:true,
|
|
filesystems:null,
|
|
syncFSRequests:0,
|
|
readFiles:{
|
|
},
|
|
ErrnoError:class extends Error {
|
|
name = 'ErrnoError';
|
|
// We set the `name` property to be able to identify `FS.ErrnoError`
|
|
// - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway.
|
|
// - when using PROXYFS, an error can come from an underlying FS
|
|
// as different FS objects have their own FS.ErrnoError each,
|
|
// the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs.
|
|
// we'll use the reliable test `err.name == "ErrnoError"` instead
|
|
constructor(errno) {
|
|
super(runtimeInitialized ? strError(errno) : '');
|
|
this.errno = errno;
|
|
for (var key in ERRNO_CODES) {
|
|
if (ERRNO_CODES[key] === errno) {
|
|
this.code = key;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
},
|
|
FSStream:class {
|
|
shared = {};
|
|
get object() {
|
|
return this.node;
|
|
}
|
|
set object(val) {
|
|
this.node = val;
|
|
}
|
|
get isRead() {
|
|
return (this.flags & 2097155) !== 1;
|
|
}
|
|
get isWrite() {
|
|
return (this.flags & 2097155) !== 0;
|
|
}
|
|
get isAppend() {
|
|
return (this.flags & 1024);
|
|
}
|
|
get flags() {
|
|
return this.shared.flags;
|
|
}
|
|
set flags(val) {
|
|
this.shared.flags = val;
|
|
}
|
|
get position() {
|
|
return this.shared.position;
|
|
}
|
|
set position(val) {
|
|
this.shared.position = val;
|
|
}
|
|
},
|
|
FSNode:class {
|
|
node_ops = {};
|
|
stream_ops = {};
|
|
readMode = 292 | 73;
|
|
writeMode = 146;
|
|
mounted = null;
|
|
constructor(parent, name, mode, rdev) {
|
|
if (!parent) {
|
|
parent = this; // root node sets parent to itself
|
|
}
|
|
this.parent = parent;
|
|
this.mount = parent.mount;
|
|
this.id = FS.nextInode++;
|
|
this.name = name;
|
|
this.mode = mode;
|
|
this.rdev = rdev;
|
|
this.atime = this.mtime = this.ctime = Date.now();
|
|
}
|
|
get read() {
|
|
return (this.mode & this.readMode) === this.readMode;
|
|
}
|
|
set read(val) {
|
|
val ? this.mode |= this.readMode : this.mode &= ~this.readMode;
|
|
}
|
|
get write() {
|
|
return (this.mode & this.writeMode) === this.writeMode;
|
|
}
|
|
set write(val) {
|
|
val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode;
|
|
}
|
|
get isFolder() {
|
|
return FS.isDir(this.mode);
|
|
}
|
|
get isDevice() {
|
|
return FS.isChrdev(this.mode);
|
|
}
|
|
},
|
|
lookupPath(path, opts = {}) {
|
|
if (!path) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
opts.follow_mount ??= true
|
|
|
|
if (!PATH.isAbs(path)) {
|
|
path = FS.cwd() + '/' + path;
|
|
}
|
|
|
|
// limit max consecutive symlinks to 40 (SYMLOOP_MAX).
|
|
linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) {
|
|
// split the absolute path
|
|
var parts = path.split('/').filter((p) => !!p);
|
|
|
|
// start at the root
|
|
var current = FS.root;
|
|
var current_path = '/';
|
|
|
|
for (var i = 0; i < parts.length; i++) {
|
|
var islast = (i === parts.length-1);
|
|
if (islast && opts.parent) {
|
|
// stop resolving
|
|
break;
|
|
}
|
|
|
|
if (parts[i] === '.') {
|
|
continue;
|
|
}
|
|
|
|
if (parts[i] === '..') {
|
|
current_path = PATH.dirname(current_path);
|
|
if (FS.isRoot(current)) {
|
|
path = current_path + '/' + parts.slice(i + 1).join('/');
|
|
// We're making progress here, don't let many consecutive ..'s
|
|
// lead to ELOOP
|
|
nlinks--;
|
|
continue linkloop;
|
|
} else {
|
|
current = current.parent;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
current_path = PATH.join2(current_path, parts[i]);
|
|
try {
|
|
current = FS.lookupNode(current, parts[i]);
|
|
} catch (e) {
|
|
// if noent_okay is true, suppress a ENOENT in the last component
|
|
// and return an object with an undefined node. This is needed for
|
|
// resolving symlinks in the path when creating a file.
|
|
if ((e?.errno === 44) && islast && opts.noent_okay) {
|
|
return { path: current_path };
|
|
}
|
|
throw e;
|
|
}
|
|
|
|
// jump to the mount's root node if this is a mountpoint
|
|
if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) {
|
|
current = current.mounted.root;
|
|
}
|
|
|
|
// by default, lookupPath will not follow a symlink if it is the final path component.
|
|
// setting opts.follow = true will override this behavior.
|
|
if (FS.isLink(current.mode) && (!islast || opts.follow)) {
|
|
if (!current.node_ops.readlink) {
|
|
throw new FS.ErrnoError(52);
|
|
}
|
|
var link = current.node_ops.readlink(current);
|
|
if (!PATH.isAbs(link)) {
|
|
link = PATH.dirname(current_path) + '/' + link;
|
|
}
|
|
path = link + '/' + parts.slice(i + 1).join('/');
|
|
continue linkloop;
|
|
}
|
|
}
|
|
return { path: current_path, node: current };
|
|
}
|
|
throw new FS.ErrnoError(32);
|
|
},
|
|
getPath(node) {
|
|
var path;
|
|
while (true) {
|
|
if (FS.isRoot(node)) {
|
|
var mount = node.mount.mountpoint;
|
|
if (!path) return mount;
|
|
return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path;
|
|
}
|
|
path = path ? `${node.name}/${path}` : node.name;
|
|
node = node.parent;
|
|
}
|
|
},
|
|
hashName(parentid, name) {
|
|
var hash = 0;
|
|
|
|
for (var i = 0; i < name.length; i++) {
|
|
hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
|
|
}
|
|
return ((parentid + hash) >>> 0) % FS.nameTable.length;
|
|
},
|
|
hashAddNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
node.name_next = FS.nameTable[hash];
|
|
FS.nameTable[hash] = node;
|
|
},
|
|
hashRemoveNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
if (FS.nameTable[hash] === node) {
|
|
FS.nameTable[hash] = node.name_next;
|
|
} else {
|
|
var current = FS.nameTable[hash];
|
|
while (current) {
|
|
if (current.name_next === node) {
|
|
current.name_next = node.name_next;
|
|
break;
|
|
}
|
|
current = current.name_next;
|
|
}
|
|
}
|
|
},
|
|
lookupNode(parent, name) {
|
|
var errCode = FS.mayLookup(parent);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
var hash = FS.hashName(parent.id, name);
|
|
for (var node = FS.nameTable[hash]; node; node = node.name_next) {
|
|
var nodeName = node.name;
|
|
if (node.parent.id === parent.id && nodeName === name) {
|
|
return node;
|
|
}
|
|
}
|
|
// if we failed to find it in the cache, call into the VFS
|
|
return FS.lookup(parent, name);
|
|
},
|
|
createNode(parent, name, mode, rdev) {
|
|
assert(typeof parent == 'object')
|
|
var node = new FS.FSNode(parent, name, mode, rdev);
|
|
|
|
FS.hashAddNode(node);
|
|
|
|
return node;
|
|
},
|
|
destroyNode(node) {
|
|
FS.hashRemoveNode(node);
|
|
},
|
|
isRoot(node) {
|
|
return node === node.parent;
|
|
},
|
|
isMountpoint(node) {
|
|
return !!node.mounted;
|
|
},
|
|
isFile(mode) {
|
|
return (mode & 61440) === 32768;
|
|
},
|
|
isDir(mode) {
|
|
return (mode & 61440) === 16384;
|
|
},
|
|
isLink(mode) {
|
|
return (mode & 61440) === 40960;
|
|
},
|
|
isChrdev(mode) {
|
|
return (mode & 61440) === 8192;
|
|
},
|
|
isBlkdev(mode) {
|
|
return (mode & 61440) === 24576;
|
|
},
|
|
isFIFO(mode) {
|
|
return (mode & 61440) === 4096;
|
|
},
|
|
isSocket(mode) {
|
|
return (mode & 49152) === 49152;
|
|
},
|
|
flagsToPermissionString(flag) {
|
|
var perms = ['r', 'w', 'rw'][flag & 3];
|
|
if ((flag & 512)) {
|
|
perms += 'w';
|
|
}
|
|
return perms;
|
|
},
|
|
nodePermissions(node, perms) {
|
|
if (FS.ignorePermissions) {
|
|
return 0;
|
|
}
|
|
// return 0 if any user, group or owner bits are set.
|
|
if (perms.includes('r') && !(node.mode & 292)) {
|
|
return 2;
|
|
} else if (perms.includes('w') && !(node.mode & 146)) {
|
|
return 2;
|
|
} else if (perms.includes('x') && !(node.mode & 73)) {
|
|
return 2;
|
|
}
|
|
return 0;
|
|
},
|
|
mayLookup(dir) {
|
|
if (!FS.isDir(dir.mode)) return 54;
|
|
var errCode = FS.nodePermissions(dir, 'x');
|
|
if (errCode) return errCode;
|
|
if (!dir.node_ops.lookup) return 2;
|
|
return 0;
|
|
},
|
|
mayCreate(dir, name) {
|
|
if (!FS.isDir(dir.mode)) {
|
|
return 54;
|
|
}
|
|
try {
|
|
var node = FS.lookupNode(dir, name);
|
|
return 20;
|
|
} catch (e) {
|
|
}
|
|
return FS.nodePermissions(dir, 'wx');
|
|
},
|
|
mayDelete(dir, name, isdir) {
|
|
var node;
|
|
try {
|
|
node = FS.lookupNode(dir, name);
|
|
} catch (e) {
|
|
return e.errno;
|
|
}
|
|
var errCode = FS.nodePermissions(dir, 'wx');
|
|
if (errCode) {
|
|
return errCode;
|
|
}
|
|
if (isdir) {
|
|
if (!FS.isDir(node.mode)) {
|
|
return 54;
|
|
}
|
|
if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
|
|
return 10;
|
|
}
|
|
} else {
|
|
if (FS.isDir(node.mode)) {
|
|
return 31;
|
|
}
|
|
}
|
|
return 0;
|
|
},
|
|
mayOpen(node, flags) {
|
|
if (!node) {
|
|
return 44;
|
|
}
|
|
if (FS.isLink(node.mode)) {
|
|
return 32;
|
|
} else if (FS.isDir(node.mode)) {
|
|
if (FS.flagsToPermissionString(flags) !== 'r' // opening for write
|
|
|| (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only)
|
|
return 31;
|
|
}
|
|
}
|
|
return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
|
|
},
|
|
checkOpExists(op, err) {
|
|
if (!op) {
|
|
throw new FS.ErrnoError(err);
|
|
}
|
|
return op;
|
|
},
|
|
MAX_OPEN_FDS:4096,
|
|
nextfd() {
|
|
for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
|
|
if (!FS.streams[fd]) {
|
|
return fd;
|
|
}
|
|
}
|
|
throw new FS.ErrnoError(33);
|
|
},
|
|
getStreamChecked(fd) {
|
|
var stream = FS.getStream(fd);
|
|
if (!stream) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
return stream;
|
|
},
|
|
getStream:(fd) => FS.streams[fd],
|
|
createStream(stream, fd = -1) {
|
|
assert(fd >= -1);
|
|
|
|
// clone it, so we can return an instance of FSStream
|
|
stream = Object.assign(new FS.FSStream(), stream);
|
|
if (fd == -1) {
|
|
fd = FS.nextfd();
|
|
}
|
|
stream.fd = fd;
|
|
FS.streams[fd] = stream;
|
|
return stream;
|
|
},
|
|
closeStream(fd) {
|
|
FS.streams[fd] = null;
|
|
},
|
|
dupStream(origStream, fd = -1) {
|
|
var stream = FS.createStream(origStream, fd);
|
|
stream.stream_ops?.dup?.(stream);
|
|
return stream;
|
|
},
|
|
doSetAttr(stream, node, attr) {
|
|
var setattr = stream?.stream_ops.setattr;
|
|
var arg = setattr ? stream : node;
|
|
setattr ??= node.node_ops.setattr;
|
|
FS.checkOpExists(setattr, 63)
|
|
setattr(arg, attr);
|
|
},
|
|
chrdev_stream_ops:{
|
|
open(stream) {
|
|
var device = FS.getDevice(stream.node.rdev);
|
|
// override node's stream ops with the device's
|
|
stream.stream_ops = device.stream_ops;
|
|
// forward the open call
|
|
stream.stream_ops.open?.(stream);
|
|
},
|
|
llseek() {
|
|
throw new FS.ErrnoError(70);
|
|
},
|
|
},
|
|
major:(dev) => ((dev) >> 8),
|
|
minor:(dev) => ((dev) & 0xff),
|
|
makedev:(ma, mi) => ((ma) << 8 | (mi)),
|
|
registerDevice(dev, ops) {
|
|
FS.devices[dev] = { stream_ops: ops };
|
|
},
|
|
getDevice:(dev) => FS.devices[dev],
|
|
getMounts(mount) {
|
|
var mounts = [];
|
|
var check = [mount];
|
|
|
|
while (check.length) {
|
|
var m = check.pop();
|
|
|
|
mounts.push(m);
|
|
|
|
check.push(...m.mounts);
|
|
}
|
|
|
|
return mounts;
|
|
},
|
|
syncfs(populate, callback) {
|
|
if (typeof populate == 'function') {
|
|
callback = populate;
|
|
populate = false;
|
|
}
|
|
|
|
FS.syncFSRequests++;
|
|
|
|
if (FS.syncFSRequests > 1) {
|
|
err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`);
|
|
}
|
|
|
|
var mounts = FS.getMounts(FS.root.mount);
|
|
var completed = 0;
|
|
|
|
function doCallback(errCode) {
|
|
assert(FS.syncFSRequests > 0);
|
|
FS.syncFSRequests--;
|
|
return callback(errCode);
|
|
}
|
|
|
|
function done(errCode) {
|
|
if (errCode) {
|
|
if (!done.errored) {
|
|
done.errored = true;
|
|
return doCallback(errCode);
|
|
}
|
|
return;
|
|
}
|
|
if (++completed >= mounts.length) {
|
|
doCallback(null);
|
|
}
|
|
};
|
|
|
|
// sync all mounts
|
|
for (var mount of mounts) {
|
|
if (mount.type.syncfs) {
|
|
mount.type.syncfs(mount, populate, done);
|
|
} else {
|
|
done(null);
|
|
}
|
|
}
|
|
},
|
|
mount(type, opts, mountpoint) {
|
|
if (typeof type == 'string') {
|
|
// The filesystem was not included, and instead we have an error
|
|
// message stored in the variable.
|
|
throw type;
|
|
}
|
|
var root = mountpoint === '/';
|
|
var pseudo = !mountpoint;
|
|
var node;
|
|
|
|
if (root && FS.root) {
|
|
throw new FS.ErrnoError(10);
|
|
} else if (!root && !pseudo) {
|
|
var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
|
|
|
|
mountpoint = lookup.path; // use the absolute path
|
|
node = lookup.node;
|
|
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
|
|
if (!FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
}
|
|
|
|
var mount = {
|
|
type,
|
|
opts,
|
|
mountpoint,
|
|
mounts: []
|
|
};
|
|
|
|
// create a root node for the fs
|
|
var mountRoot = type.mount(mount);
|
|
mountRoot.mount = mount;
|
|
mount.root = mountRoot;
|
|
|
|
if (root) {
|
|
FS.root = mountRoot;
|
|
} else if (node) {
|
|
// set as a mountpoint
|
|
node.mounted = mount;
|
|
|
|
// add the new mount to the current mount's children
|
|
if (node.mount) {
|
|
node.mount.mounts.push(mount);
|
|
}
|
|
}
|
|
|
|
return mountRoot;
|
|
},
|
|
unmount(mountpoint) {
|
|
var lookup = FS.lookupPath(mountpoint, { follow_mount: false });
|
|
|
|
if (!FS.isMountpoint(lookup.node)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
|
|
// destroy the nodes for this mount, and all its child mounts
|
|
var node = lookup.node;
|
|
var mount = node.mounted;
|
|
var mounts = FS.getMounts(mount);
|
|
|
|
for (var [hash, current] of Object.entries(FS.nameTable)) {
|
|
while (current) {
|
|
var next = current.name_next;
|
|
|
|
if (mounts.includes(current.mount)) {
|
|
FS.destroyNode(current);
|
|
}
|
|
|
|
current = next;
|
|
}
|
|
}
|
|
|
|
// no longer a mountpoint
|
|
node.mounted = null;
|
|
|
|
// remove this mount from the child mounts
|
|
var idx = node.mount.mounts.indexOf(mount);
|
|
assert(idx !== -1);
|
|
node.mount.mounts.splice(idx, 1);
|
|
},
|
|
lookup(parent, name) {
|
|
return parent.node_ops.lookup(parent, name);
|
|
},
|
|
mknod(path, mode, dev) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
if (!name) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (name === '.' || name === '..') {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
var errCode = FS.mayCreate(parent, name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.mknod) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.mknod(parent, name, mode, dev);
|
|
},
|
|
statfs(path) {
|
|
return FS.statfsNode(FS.lookupPath(path, {follow: true}).node);
|
|
},
|
|
statfsStream(stream) {
|
|
// We keep a separate statfsStream function because noderawfs overrides
|
|
// it. In noderawfs, stream.node is sometimes null. Instead, we need to
|
|
// look at stream.path.
|
|
return FS.statfsNode(stream.node);
|
|
},
|
|
statfsNode(node) {
|
|
// NOTE: None of the defaults here are true. We're just returning safe and
|
|
// sane values. Currently nodefs and rawfs replace these defaults,
|
|
// other file systems leave them alone.
|
|
var rtn = {
|
|
bsize: 4096,
|
|
frsize: 4096,
|
|
blocks: 1e6,
|
|
bfree: 5e5,
|
|
bavail: 5e5,
|
|
files: FS.nextInode,
|
|
ffree: FS.nextInode - 1,
|
|
fsid: 42,
|
|
flags: 2,
|
|
namelen: 255,
|
|
};
|
|
|
|
if (node.node_ops.statfs) {
|
|
Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root));
|
|
}
|
|
return rtn;
|
|
},
|
|
create(path, mode = 0o666) {
|
|
mode &= 4095;
|
|
mode |= 32768;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdir(path, mode = 0o777) {
|
|
mode &= 511 | 512;
|
|
mode |= 16384;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdirTree(path, mode) {
|
|
var dirs = path.split('/');
|
|
var d = '';
|
|
for (var dir of dirs) {
|
|
if (!dir) continue;
|
|
if (d || PATH.isAbs(path)) d += '/';
|
|
d += dir;
|
|
try {
|
|
FS.mkdir(d, mode);
|
|
} catch(e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
}
|
|
},
|
|
mkdev(path, mode, dev) {
|
|
if (typeof dev == 'undefined') {
|
|
dev = mode;
|
|
mode = 0o666;
|
|
}
|
|
mode |= 8192;
|
|
return FS.mknod(path, mode, dev);
|
|
},
|
|
symlink(oldpath, newpath) {
|
|
if (!PATH_FS.resolve(oldpath)) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var lookup = FS.lookupPath(newpath, { parent: true });
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var newname = PATH.basename(newpath);
|
|
var errCode = FS.mayCreate(parent, newname);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.symlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.symlink(parent, newname, oldpath);
|
|
},
|
|
rename(old_path, new_path) {
|
|
var old_dirname = PATH.dirname(old_path);
|
|
var new_dirname = PATH.dirname(new_path);
|
|
var old_name = PATH.basename(old_path);
|
|
var new_name = PATH.basename(new_path);
|
|
// parents must exist
|
|
var lookup, old_dir, new_dir;
|
|
|
|
// let the errors from non existent directories percolate up
|
|
lookup = FS.lookupPath(old_path, { parent: true });
|
|
old_dir = lookup.node;
|
|
lookup = FS.lookupPath(new_path, { parent: true });
|
|
new_dir = lookup.node;
|
|
|
|
if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
|
|
// need to be part of the same mount
|
|
if (old_dir.mount !== new_dir.mount) {
|
|
throw new FS.ErrnoError(75);
|
|
}
|
|
// source must exist
|
|
var old_node = FS.lookupNode(old_dir, old_name);
|
|
// old path should not be an ancestor of the new path
|
|
var relative = PATH_FS.relative(old_path, new_dirname);
|
|
if (relative.charAt(0) !== '.') {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
// new path should not be an ancestor of the old path
|
|
relative = PATH_FS.relative(new_path, old_dirname);
|
|
if (relative.charAt(0) !== '.') {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
// see if the new path already exists
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {
|
|
// not fatal
|
|
}
|
|
// early out if nothing needs to change
|
|
if (old_node === new_node) {
|
|
return;
|
|
}
|
|
// we'll need to delete the old entry
|
|
var isdir = FS.isDir(old_node.mode);
|
|
var errCode = FS.mayDelete(old_dir, old_name, isdir);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
// need delete permissions if we'll be overwriting.
|
|
// need create permissions if new doesn't already exist.
|
|
errCode = new_node ?
|
|
FS.mayDelete(new_dir, new_name, isdir) :
|
|
FS.mayCreate(new_dir, new_name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!old_dir.node_ops.rename) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
// if we are going to change the parent, check write permissions
|
|
if (new_dir !== old_dir) {
|
|
errCode = FS.nodePermissions(old_dir, 'w');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// remove the node from the lookup hash
|
|
FS.hashRemoveNode(old_node);
|
|
// do the underlying fs rename
|
|
try {
|
|
old_dir.node_ops.rename(old_node, new_dir, new_name);
|
|
// update old node (we do this here to avoid each backend
|
|
// needing to)
|
|
old_node.parent = new_dir;
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
// add the node back to the hash (in case node_ops.rename
|
|
// changed its name)
|
|
FS.hashAddNode(old_node);
|
|
}
|
|
},
|
|
rmdir(path) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, true);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.rmdir) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.rmdir(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readdir(path) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
var node = lookup.node;
|
|
var readdir = FS.checkOpExists(node.node_ops.readdir, 54);
|
|
return readdir(node);
|
|
},
|
|
unlink(path) {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, false);
|
|
if (errCode) {
|
|
// According to POSIX, we should map EISDIR to EPERM, but
|
|
// we instead do what Linux does (and we must, as we use
|
|
// the musl linux libc).
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.unlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.unlink(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readlink(path) {
|
|
var lookup = FS.lookupPath(path);
|
|
var link = lookup.node;
|
|
if (!link) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!link.node_ops.readlink) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return link.node_ops.readlink(link);
|
|
},
|
|
stat(path, dontFollow) {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
var node = lookup.node;
|
|
var getattr = FS.checkOpExists(node.node_ops.getattr, 63);
|
|
return getattr(node);
|
|
},
|
|
fstat(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
var node = stream.node;
|
|
var getattr = stream.stream_ops.getattr;
|
|
var arg = getattr ? stream : node;
|
|
getattr ??= node.node_ops.getattr;
|
|
FS.checkOpExists(getattr, 63)
|
|
return getattr(arg);
|
|
},
|
|
lstat(path) {
|
|
return FS.stat(path, true);
|
|
},
|
|
doChmod(stream, node, mode, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
mode: (mode & 4095) | (node.mode & ~4095),
|
|
ctime: Date.now(),
|
|
dontFollow
|
|
});
|
|
},
|
|
chmod(path, mode, dontFollow) {
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChmod(null, node, mode, dontFollow);
|
|
},
|
|
lchmod(path, mode) {
|
|
FS.chmod(path, mode, true);
|
|
},
|
|
fchmod(fd, mode) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChmod(stream, stream.node, mode, false);
|
|
},
|
|
doChown(stream, node, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
timestamp: Date.now(),
|
|
dontFollow
|
|
// we ignore the uid / gid for now
|
|
});
|
|
},
|
|
chown(path, uid, gid, dontFollow) {
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: !dontFollow });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChown(null, node, dontFollow);
|
|
},
|
|
lchown(path, uid, gid) {
|
|
FS.chown(path, uid, gid, true);
|
|
},
|
|
fchown(fd, uid, gid) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChown(stream, stream.node, false);
|
|
},
|
|
doTruncate(stream, node, len) {
|
|
if (FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!FS.isFile(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var errCode = FS.nodePermissions(node, 'w');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.doSetAttr(stream, node, {
|
|
size: len,
|
|
timestamp: Date.now()
|
|
});
|
|
},
|
|
truncate(path, len) {
|
|
if (len < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var node;
|
|
if (typeof path == 'string') {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doTruncate(null, node, len);
|
|
},
|
|
ftruncate(fd, len) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
if (len < 0 || (stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
FS.doTruncate(stream, stream.node, len);
|
|
},
|
|
utime(path, atime, mtime) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
var node = lookup.node;
|
|
var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
|
|
setattr(node, {
|
|
atime: atime,
|
|
mtime: mtime
|
|
});
|
|
},
|
|
open(path, flags, mode = 0o666) {
|
|
if (path === "") {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags;
|
|
if ((flags & 64)) {
|
|
mode = (mode & 4095) | 32768;
|
|
} else {
|
|
mode = 0;
|
|
}
|
|
var node;
|
|
var isDirPath;
|
|
if (typeof path == 'object') {
|
|
node = path;
|
|
} else {
|
|
isDirPath = path.endsWith("/");
|
|
// noent_okay makes it so that if the final component of the path
|
|
// doesn't exist, lookupPath returns `node: undefined`. `path` will be
|
|
// updated to point to the target of all symlinks.
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !(flags & 131072),
|
|
noent_okay: true
|
|
});
|
|
node = lookup.node;
|
|
path = lookup.path;
|
|
}
|
|
// perhaps we need to create the node
|
|
var created = false;
|
|
if ((flags & 64)) {
|
|
if (node) {
|
|
// if O_CREAT and O_EXCL are set, error out if the node already exists
|
|
if ((flags & 128)) {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
} else if (isDirPath) {
|
|
throw new FS.ErrnoError(31);
|
|
} else {
|
|
// node doesn't exist, try to create it
|
|
// Ignore the permission bits here to ensure we can `open` this new
|
|
// file below. We use chmod below to apply the permissions once the
|
|
// file is open.
|
|
node = FS.mknod(path, mode | 0o777, 0);
|
|
created = true;
|
|
}
|
|
}
|
|
if (!node) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
// can't truncate a device
|
|
if (FS.isChrdev(node.mode)) {
|
|
flags &= ~512;
|
|
}
|
|
// if asked only for a directory, then this must be one
|
|
if ((flags & 65536) && !FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
// check permissions, if this is not a file we just created now (it is ok to
|
|
// create and write to a file with read-only permissions; it is read-only
|
|
// for later use)
|
|
if (!created) {
|
|
var errCode = FS.mayOpen(node, flags);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// do truncation if necessary
|
|
if ((flags & 512) && !created) {
|
|
FS.truncate(node, 0);
|
|
}
|
|
// we've already handled these, don't pass down to the underlying vfs
|
|
flags &= ~(128 | 512 | 131072);
|
|
|
|
// register the stream with the filesystem
|
|
var stream = FS.createStream({
|
|
node,
|
|
path: FS.getPath(node), // we want the absolute path to the node
|
|
flags,
|
|
seekable: true,
|
|
position: 0,
|
|
stream_ops: node.stream_ops,
|
|
// used by the file family libc calls (fopen, fwrite, ferror, etc.)
|
|
ungotten: [],
|
|
error: false
|
|
});
|
|
// call the new stream's open function
|
|
if (stream.stream_ops.open) {
|
|
stream.stream_ops.open(stream);
|
|
}
|
|
if (created) {
|
|
FS.chmod(node, mode & 0o777);
|
|
}
|
|
if (Module['logReadFiles'] && !(flags & 1)) {
|
|
if (!(path in FS.readFiles)) {
|
|
FS.readFiles[path] = 1;
|
|
}
|
|
}
|
|
return stream;
|
|
},
|
|
close(stream) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (stream.getdents) stream.getdents = null; // free readdir state
|
|
try {
|
|
if (stream.stream_ops.close) {
|
|
stream.stream_ops.close(stream);
|
|
}
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
FS.closeStream(stream.fd);
|
|
}
|
|
stream.fd = null;
|
|
},
|
|
isClosed(stream) {
|
|
return stream.fd === null;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (!stream.seekable || !stream.stream_ops.llseek) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
if (whence != 0 && whence != 1 && whence != 2) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
stream.position = stream.stream_ops.llseek(stream, offset, whence);
|
|
stream.ungotten = [];
|
|
return stream.position;
|
|
},
|
|
read(stream, buffer, offset, length, position) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.read) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var seeking = typeof position != 'undefined';
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
|
|
if (!seeking) stream.position += bytesRead;
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.write) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (stream.seekable && stream.flags & 1024) {
|
|
// seek to the end before writing in append mode
|
|
FS.llseek(stream, 0, 2);
|
|
}
|
|
var seeking = typeof position != 'undefined';
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
|
|
if (!seeking) stream.position += bytesWritten;
|
|
return bytesWritten;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
// User requests writing to file (prot & PROT_WRITE != 0).
|
|
// Checking if we have permissions to write to the file unless
|
|
// MAP_PRIVATE flag is set. According to POSIX spec it is possible
|
|
// to write to file opened in read-only mode with MAP_PRIVATE flag,
|
|
// as all modifications will be visible only in the memory of
|
|
// the current process.
|
|
if ((prot & 2) !== 0
|
|
&& (flags & 2) === 0
|
|
&& (stream.flags & 2097155) !== 2) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if (!stream.stream_ops.mmap) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (!length) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return stream.stream_ops.mmap(stream, length, position, prot, flags);
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
assert(offset >= 0);
|
|
if (!stream.stream_ops.msync) {
|
|
return 0;
|
|
}
|
|
return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags);
|
|
},
|
|
ioctl(stream, cmd, arg) {
|
|
if (!stream.stream_ops.ioctl) {
|
|
throw new FS.ErrnoError(59);
|
|
}
|
|
return stream.stream_ops.ioctl(stream, cmd, arg);
|
|
},
|
|
readFile(path, opts = {}) {
|
|
opts.flags = opts.flags || 0;
|
|
opts.encoding = opts.encoding || 'binary';
|
|
if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') {
|
|
abort(`Invalid encoding type "${opts.encoding}"`);
|
|
}
|
|
var stream = FS.open(path, opts.flags);
|
|
var stat = FS.stat(path);
|
|
var length = stat.size;
|
|
var buf = new Uint8Array(length);
|
|
FS.read(stream, buf, 0, length, 0);
|
|
if (opts.encoding === 'utf8') {
|
|
buf = UTF8ArrayToString(buf);
|
|
}
|
|
FS.close(stream);
|
|
return buf;
|
|
},
|
|
writeFile(path, data, opts = {}) {
|
|
opts.flags = opts.flags || 577;
|
|
var stream = FS.open(path, opts.flags, opts.mode);
|
|
if (typeof data == 'string') {
|
|
data = new Uint8Array(intArrayFromString(data, true));
|
|
}
|
|
if (ArrayBuffer.isView(data)) {
|
|
FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
|
|
} else {
|
|
abort('Unsupported data type');
|
|
}
|
|
FS.close(stream);
|
|
},
|
|
cwd:() => FS.currentPath,
|
|
chdir(path) {
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
if (lookup.node === null) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!FS.isDir(lookup.node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
var errCode = FS.nodePermissions(lookup.node, 'x');
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.currentPath = lookup.path;
|
|
},
|
|
createDefaultDirectories() {
|
|
FS.mkdir('/tmp');
|
|
FS.mkdir('/home');
|
|
FS.mkdir('/home/web_user');
|
|
},
|
|
createDefaultDevices() {
|
|
// create /dev
|
|
FS.mkdir('/dev');
|
|
// setup /dev/null
|
|
FS.registerDevice(FS.makedev(1, 3), {
|
|
read: () => 0,
|
|
write: (stream, buffer, offset, length, pos) => length,
|
|
llseek: () => 0,
|
|
});
|
|
FS.mkdev('/dev/null', FS.makedev(1, 3));
|
|
// setup /dev/tty and /dev/tty1
|
|
// stderr needs to print output using err() rather than out()
|
|
// so we register a second tty just for it.
|
|
TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
|
|
TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
|
|
FS.mkdev('/dev/tty', FS.makedev(5, 0));
|
|
FS.mkdev('/dev/tty1', FS.makedev(6, 0));
|
|
// setup /dev/[u]random
|
|
// use a buffer to avoid overhead of individual crypto calls per byte
|
|
var randomBuffer = new Uint8Array(1024), randomLeft = 0;
|
|
var randomByte = () => {
|
|
if (randomLeft === 0) {
|
|
randomFill(randomBuffer);
|
|
randomLeft = randomBuffer.byteLength;
|
|
}
|
|
return randomBuffer[--randomLeft];
|
|
};
|
|
FS.createDevice('/dev', 'random', randomByte);
|
|
FS.createDevice('/dev', 'urandom', randomByte);
|
|
// we're not going to emulate the actual shm device,
|
|
// just create the tmp dirs that reside in it commonly
|
|
FS.mkdir('/dev/shm');
|
|
FS.mkdir('/dev/shm/tmp');
|
|
},
|
|
createSpecialDirectories() {
|
|
// create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the
|
|
// name of the stream for fd 6 (see test_unistd_ttyname)
|
|
FS.mkdir('/proc');
|
|
var proc_self = FS.mkdir('/proc/self');
|
|
FS.mkdir('/proc/self/fd');
|
|
FS.mount({
|
|
mount() {
|
|
var node = FS.createNode(proc_self, 'fd', 16895, 73);
|
|
node.stream_ops = {
|
|
llseek: MEMFS.stream_ops.llseek,
|
|
};
|
|
node.node_ops = {
|
|
lookup(parent, name) {
|
|
var fd = +name;
|
|
var stream = FS.getStreamChecked(fd);
|
|
var ret = {
|
|
parent: null,
|
|
mount: { mountpoint: 'fake' },
|
|
node_ops: { readlink: () => stream.path },
|
|
id: fd + 1,
|
|
};
|
|
ret.parent = ret; // make it look like a simple root node
|
|
return ret;
|
|
},
|
|
readdir() {
|
|
return Array.from(FS.streams.entries())
|
|
.filter(([k, v]) => v)
|
|
.map(([k, v]) => k.toString());
|
|
}
|
|
};
|
|
return node;
|
|
}
|
|
}, {}, '/proc/self/fd');
|
|
},
|
|
createStandardStreams(input, output, error) {
|
|
// TODO deprecate the old functionality of a single
|
|
// input / output callback and that utilizes FS.createDevice
|
|
// and instead require a unique set of stream ops
|
|
|
|
// by default, we symlink the standard streams to the
|
|
// default tty devices. however, if the standard streams
|
|
// have been overwritten we create a unique device for
|
|
// them instead.
|
|
if (input) {
|
|
FS.createDevice('/dev', 'stdin', input);
|
|
} else {
|
|
FS.symlink('/dev/tty', '/dev/stdin');
|
|
}
|
|
if (output) {
|
|
FS.createDevice('/dev', 'stdout', null, output);
|
|
} else {
|
|
FS.symlink('/dev/tty', '/dev/stdout');
|
|
}
|
|
if (error) {
|
|
FS.createDevice('/dev', 'stderr', null, error);
|
|
} else {
|
|
FS.symlink('/dev/tty1', '/dev/stderr');
|
|
}
|
|
|
|
// open default streams for the stdin, stdout and stderr devices
|
|
var stdin = FS.open('/dev/stdin', 0);
|
|
var stdout = FS.open('/dev/stdout', 1);
|
|
var stderr = FS.open('/dev/stderr', 1);
|
|
assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`);
|
|
assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`);
|
|
assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`);
|
|
},
|
|
staticInit() {
|
|
FS.nameTable = new Array(4096);
|
|
|
|
FS.mount(MEMFS, {}, '/');
|
|
|
|
FS.createDefaultDirectories();
|
|
FS.createDefaultDevices();
|
|
FS.createSpecialDirectories();
|
|
|
|
FS.filesystems = {
|
|
'MEMFS': MEMFS,
|
|
};
|
|
},
|
|
init(input, output, error) {
|
|
assert(!FS.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)');
|
|
FS.initialized = true;
|
|
|
|
// Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
|
|
input ??= Module['stdin'];
|
|
output ??= Module['stdout'];
|
|
error ??= Module['stderr'];
|
|
|
|
FS.createStandardStreams(input, output, error);
|
|
},
|
|
quit() {
|
|
FS.initialized = false;
|
|
// force-flush all streams, so we get musl std streams printed out
|
|
_fflush(0);
|
|
// close all of our streams
|
|
for (var stream of FS.streams) {
|
|
if (stream) {
|
|
FS.close(stream);
|
|
}
|
|
}
|
|
},
|
|
findObject(path, dontResolveLastLink) {
|
|
var ret = FS.analyzePath(path, dontResolveLastLink);
|
|
if (!ret.exists) {
|
|
return null;
|
|
}
|
|
return ret.object;
|
|
},
|
|
analyzePath(path, dontResolveLastLink) {
|
|
// operate from within the context of the symlink's target
|
|
try {
|
|
var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
|
|
path = lookup.path;
|
|
} catch (e) {
|
|
}
|
|
var ret = {
|
|
isRoot: false, exists: false, error: 0, name: null, path: null, object: null,
|
|
parentExists: false, parentPath: null, parentObject: null
|
|
};
|
|
try {
|
|
var lookup = FS.lookupPath(path, { parent: true });
|
|
ret.parentExists = true;
|
|
ret.parentPath = lookup.path;
|
|
ret.parentObject = lookup.node;
|
|
ret.name = PATH.basename(path);
|
|
lookup = FS.lookupPath(path, { follow: !dontResolveLastLink });
|
|
ret.exists = true;
|
|
ret.path = lookup.path;
|
|
ret.object = lookup.node;
|
|
ret.name = lookup.node.name;
|
|
ret.isRoot = lookup.path === '/';
|
|
} catch (e) {
|
|
ret.error = e.errno;
|
|
};
|
|
return ret;
|
|
},
|
|
createPath(parent, path, canRead, canWrite) {
|
|
parent = typeof parent == 'string' ? parent : FS.getPath(parent);
|
|
var parts = path.split('/').reverse();
|
|
while (parts.length) {
|
|
var part = parts.pop();
|
|
if (!part) continue;
|
|
var current = PATH.join2(parent, part);
|
|
try {
|
|
FS.mkdir(current);
|
|
} catch (e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
parent = current;
|
|
}
|
|
return current;
|
|
},
|
|
createFile(parent, name, properties, canRead, canWrite) {
|
|
var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
return FS.create(path, mode);
|
|
},
|
|
createDataFile(parent, name, data, canRead, canWrite, canOwn) {
|
|
var path = name;
|
|
if (parent) {
|
|
parent = typeof parent == 'string' ? parent : FS.getPath(parent);
|
|
path = name ? PATH.join2(parent, name) : parent;
|
|
}
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
var node = FS.create(path, mode);
|
|
if (data) {
|
|
if (typeof data == 'string') {
|
|
var arr = new Array(data.length);
|
|
for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
|
|
data = arr;
|
|
}
|
|
// make sure we can write to the file
|
|
FS.chmod(node, mode | 146);
|
|
var stream = FS.open(node, 577);
|
|
FS.write(stream, data, 0, data.length, 0, canOwn);
|
|
FS.close(stream);
|
|
FS.chmod(node, mode);
|
|
}
|
|
},
|
|
createDevice(parent, name, input, output) {
|
|
var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(!!input, !!output);
|
|
FS.createDevice.major ??= 64;
|
|
var dev = FS.makedev(FS.createDevice.major++, 0);
|
|
// Create a fake device that a set of stream ops to emulate
|
|
// the old behavior.
|
|
FS.registerDevice(dev, {
|
|
open(stream) {
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
if (output?.buffer?.length) {
|
|
output(10);
|
|
}
|
|
},
|
|
read(stream, buffer, offset, length, pos /* ignored */) {
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = input();
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset+i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
for (var i = 0; i < length; i++) {
|
|
try {
|
|
output(buffer[offset+i]);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
}
|
|
});
|
|
return FS.mkdev(path, mode, dev);
|
|
},
|
|
forceLoadFile(obj) {
|
|
if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
|
|
if (globalThis.XMLHttpRequest) {
|
|
abort("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
|
|
} else { // Command-line.
|
|
try {
|
|
obj.contents = readBinary(obj.url);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
},
|
|
createLazyFile(parent, name, url, canRead, canWrite) {
|
|
// Lazy chunked Uint8Array (implements get and length from Uint8Array).
|
|
// Actual getting is abstracted away for eventual reuse.
|
|
class LazyUint8Array {
|
|
lengthKnown = false;
|
|
chunks = []; // Loaded chunks. Index is the chunk number
|
|
get(idx) {
|
|
if (idx > this.length-1 || idx < 0) {
|
|
return undefined;
|
|
}
|
|
var chunkOffset = idx % this.chunkSize;
|
|
var chunkNum = (idx / this.chunkSize)|0;
|
|
return this.getter(chunkNum)[chunkOffset];
|
|
}
|
|
setDataGetter(getter) {
|
|
this.getter = getter;
|
|
}
|
|
cacheLength() {
|
|
// Find length
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('HEAD', url, false);
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) abort("Couldn't load " + url + ". Status: " + xhr.status);
|
|
var datalength = Number(xhr.getResponseHeader("Content-length"));
|
|
var header;
|
|
var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
|
|
var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip";
|
|
|
|
var chunkSize = 1024*1024; // Chunk size in bytes
|
|
|
|
if (!hasByteServing) chunkSize = datalength;
|
|
|
|
// Function to get a range from the remote URL.
|
|
var doXHR = (from, to) => {
|
|
if (from > to) abort("invalid range (" + from + ", " + to + ") or no bytes requested!");
|
|
if (to > datalength-1) abort("only " + datalength + " bytes available! programmer error!");
|
|
|
|
// TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
|
|
var xhr = new XMLHttpRequest();
|
|
xhr.open('GET', url, false);
|
|
if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
|
|
|
|
// Some hints to the browser that we want binary data.
|
|
xhr.responseType = 'arraybuffer';
|
|
if (xhr.overrideMimeType) {
|
|
xhr.overrideMimeType('text/plain; charset=x-user-defined');
|
|
}
|
|
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) abort("Couldn't load " + url + ". Status: " + xhr.status);
|
|
if (xhr.response !== undefined) {
|
|
return new Uint8Array(/** @type{Array<number>} */(xhr.response || []));
|
|
}
|
|
return intArrayFromString(xhr.responseText || '', true);
|
|
};
|
|
var lazyArray = this;
|
|
lazyArray.setDataGetter((chunkNum) => {
|
|
var start = chunkNum * chunkSize;
|
|
var end = (chunkNum+1) * chunkSize - 1; // including this byte
|
|
end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block
|
|
if (typeof lazyArray.chunks[chunkNum] == 'undefined') {
|
|
lazyArray.chunks[chunkNum] = doXHR(start, end);
|
|
}
|
|
if (typeof lazyArray.chunks[chunkNum] == 'undefined') abort('doXHR failed!');
|
|
return lazyArray.chunks[chunkNum];
|
|
});
|
|
|
|
if (usesGzip || !datalength) {
|
|
// if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length
|
|
chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file
|
|
datalength = this.getter(0).length;
|
|
chunkSize = datalength;
|
|
out("LazyFiles on gzip forces download of the whole file when length is accessed");
|
|
}
|
|
|
|
this._length = datalength;
|
|
this._chunkSize = chunkSize;
|
|
this.lengthKnown = true;
|
|
}
|
|
get length() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._length;
|
|
}
|
|
get chunkSize() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._chunkSize;
|
|
}
|
|
}
|
|
|
|
if (globalThis.XMLHttpRequest) {
|
|
if (!ENVIRONMENT_IS_WORKER) abort('Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc');
|
|
var lazyArray = new LazyUint8Array();
|
|
var properties = { isDevice: false, contents: lazyArray };
|
|
} else {
|
|
var properties = { isDevice: false, url: url };
|
|
}
|
|
|
|
var node = FS.createFile(parent, name, properties, canRead, canWrite);
|
|
// This is a total hack, but I want to get this lazy file code out of the
|
|
// core of MEMFS. If we want to keep this lazy file concept I feel it should
|
|
// be its own thin LAZYFS proxying calls to MEMFS.
|
|
if (properties.contents) {
|
|
node.contents = properties.contents;
|
|
} else if (properties.url) {
|
|
node.contents = null;
|
|
node.url = properties.url;
|
|
}
|
|
// Add a function that defers querying the file size until it is asked the first time.
|
|
Object.defineProperties(node, {
|
|
usedBytes: {
|
|
get: function() { return this.contents.length; }
|
|
}
|
|
});
|
|
// override each stream op with one that tries to force load the lazy file first
|
|
var stream_ops = {};
|
|
for (const [key, fn] of Object.entries(node.stream_ops)) {
|
|
stream_ops[key] = (...args) => {
|
|
FS.forceLoadFile(node);
|
|
return fn(...args);
|
|
};
|
|
}
|
|
function writeChunks(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= contents.length)
|
|
return 0;
|
|
var size = Math.min(contents.length - position, length);
|
|
assert(size >= 0);
|
|
if (contents.slice) { // normal array
|
|
for (var i = 0; i < size; i++) {
|
|
buffer[offset + i] = contents[position + i];
|
|
}
|
|
} else {
|
|
for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR
|
|
buffer[offset + i] = contents.get(position + i);
|
|
}
|
|
}
|
|
return size;
|
|
}
|
|
// use a custom read function
|
|
stream_ops.read = (stream, buffer, offset, length, position) => {
|
|
FS.forceLoadFile(node);
|
|
return writeChunks(stream, buffer, offset, length, position)
|
|
};
|
|
// use a custom mmap function
|
|
stream_ops.mmap = (stream, length, position, prot, flags) => {
|
|
FS.forceLoadFile(node);
|
|
var ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
writeChunks(stream, HEAP8, ptr, length, position);
|
|
return { ptr, allocated: true };
|
|
};
|
|
node.stream_ops = stream_ops;
|
|
return node;
|
|
},
|
|
absolutePath() {
|
|
abort('FS.absolutePath has been removed; use PATH_FS.resolve instead');
|
|
},
|
|
createFolder() {
|
|
abort('FS.createFolder has been removed; use FS.mkdir instead');
|
|
},
|
|
createLink() {
|
|
abort('FS.createLink has been removed; use FS.symlink instead');
|
|
},
|
|
joinPath() {
|
|
abort('FS.joinPath has been removed; use PATH.join instead');
|
|
},
|
|
mmapAlloc() {
|
|
abort('FS.mmapAlloc has been replaced by the top level function mmapAlloc');
|
|
},
|
|
standardizePath() {
|
|
abort('FS.standardizePath has been removed; use PATH.normalize instead');
|
|
},
|
|
};
|
|
|
|
var SYSCALLS = {
|
|
calculateAt(dirfd, path, allowEmpty) {
|
|
if (PATH.isAbs(path)) {
|
|
return path;
|
|
}
|
|
// relative path
|
|
var dir;
|
|
if (dirfd === -100) {
|
|
dir = FS.cwd();
|
|
} else {
|
|
var dirstream = SYSCALLS.getStreamFromFD(dirfd);
|
|
dir = dirstream.path;
|
|
}
|
|
if (path.length == 0) {
|
|
if (!allowEmpty) {
|
|
throw new FS.ErrnoError(44);;
|
|
}
|
|
return dir;
|
|
}
|
|
return dir + '/' + path;
|
|
},
|
|
writeStat(buf, stat) {
|
|
HEAPU32[((buf)>>2)] = stat.dev;checkInt32(stat.dev);
|
|
HEAPU32[(((buf)+(4))>>2)] = stat.mode;checkInt32(stat.mode);
|
|
HEAPU32[(((buf)+(8))>>2)] = stat.nlink;checkInt32(stat.nlink);
|
|
HEAPU32[(((buf)+(12))>>2)] = stat.uid;checkInt32(stat.uid);
|
|
HEAPU32[(((buf)+(16))>>2)] = stat.gid;checkInt32(stat.gid);
|
|
HEAPU32[(((buf)+(20))>>2)] = stat.rdev;checkInt32(stat.rdev);
|
|
HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size);checkInt64(stat.size);
|
|
HEAP32[(((buf)+(32))>>2)] = 4096;checkInt32(4096);
|
|
HEAP32[(((buf)+(36))>>2)] = stat.blocks;checkInt32(stat.blocks);
|
|
var atime = stat.atime.getTime();
|
|
var mtime = stat.mtime.getTime();
|
|
var ctime = stat.ctime.getTime();
|
|
HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000));checkInt64(Math.floor(atime / 1000));
|
|
HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000;checkInt32((atime % 1000) * 1000 * 1000);
|
|
HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000));checkInt64(Math.floor(mtime / 1000));
|
|
HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000;checkInt32((mtime % 1000) * 1000 * 1000);
|
|
HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000));checkInt64(Math.floor(ctime / 1000));
|
|
HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000;checkInt32((ctime % 1000) * 1000 * 1000);
|
|
HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino);checkInt64(stat.ino);
|
|
return 0;
|
|
},
|
|
writeStatFs(buf, stats) {
|
|
HEAPU32[(((buf)+(4))>>2)] = stats.bsize;checkInt32(stats.bsize);
|
|
HEAPU32[(((buf)+(60))>>2)] = stats.bsize;checkInt32(stats.bsize);
|
|
HEAP64[(((buf)+(8))>>3)] = BigInt(stats.blocks);checkInt64(stats.blocks);
|
|
HEAP64[(((buf)+(16))>>3)] = BigInt(stats.bfree);checkInt64(stats.bfree);
|
|
HEAP64[(((buf)+(24))>>3)] = BigInt(stats.bavail);checkInt64(stats.bavail);
|
|
HEAP64[(((buf)+(32))>>3)] = BigInt(stats.files);checkInt64(stats.files);
|
|
HEAP64[(((buf)+(40))>>3)] = BigInt(stats.ffree);checkInt64(stats.ffree);
|
|
HEAPU32[(((buf)+(48))>>2)] = stats.fsid;checkInt32(stats.fsid);
|
|
HEAPU32[(((buf)+(64))>>2)] = stats.flags;checkInt32(stats.flags); // ST_NOSUID
|
|
HEAPU32[(((buf)+(56))>>2)] = stats.namelen;checkInt32(stats.namelen);
|
|
},
|
|
doMsync(addr, stream, len, flags, offset) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (flags & 2) {
|
|
// MAP_PRIVATE calls need not to be synced back to underlying fs
|
|
return 0;
|
|
}
|
|
var buffer = HEAPU8.slice(addr, addr + len);
|
|
FS.msync(stream, buffer, offset, len, flags);
|
|
},
|
|
getStreamFromFD(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
return stream;
|
|
},
|
|
varargs:undefined,
|
|
getStr(ptr) {
|
|
var ret = UTF8ToString(ptr);
|
|
return ret;
|
|
},
|
|
};
|
|
function ___syscall_chmod(path, mode) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
FS.chmod(path, mode);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_faccessat(dirfd, path, amode, flags) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
assert(!flags || flags == 512);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
if (amode & ~7) {
|
|
// need a valid mode
|
|
return -28;
|
|
}
|
|
var lookup = FS.lookupPath(path, { follow: true });
|
|
var node = lookup.node;
|
|
if (!node) {
|
|
return -44;
|
|
}
|
|
var perms = '';
|
|
if (amode & 4) perms += 'r';
|
|
if (amode & 2) perms += 'w';
|
|
if (amode & 1) perms += 'x';
|
|
if (perms /* otherwise, they've just passed F_OK */ && FS.nodePermissions(node, perms)) {
|
|
return -2;
|
|
}
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_fchmod(fd, mode) {
|
|
try {
|
|
|
|
FS.fchmod(fd, mode);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_fchown32(fd, owner, group) {
|
|
try {
|
|
|
|
FS.fchown(fd, owner, group);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var syscallGetVarargI = () => {
|
|
assert(SYSCALLS.varargs != undefined);
|
|
// the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number.
|
|
var ret = HEAP32[((+SYSCALLS.varargs)>>2)];
|
|
SYSCALLS.varargs += 4;
|
|
return ret;
|
|
};
|
|
var syscallGetVarargP = syscallGetVarargI;
|
|
|
|
|
|
function ___syscall_fcntl64(fd, cmd, varargs) {
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
switch (cmd) {
|
|
case 0: {
|
|
var arg = syscallGetVarargI();
|
|
if (arg < 0) {
|
|
return -28;
|
|
}
|
|
while (FS.streams[arg]) {
|
|
arg++;
|
|
}
|
|
var newStream;
|
|
newStream = FS.dupStream(stream, arg);
|
|
return newStream.fd;
|
|
}
|
|
case 1:
|
|
case 2:
|
|
return 0; // FD_CLOEXEC makes no sense for a single process.
|
|
case 3:
|
|
return stream.flags;
|
|
case 4: {
|
|
var arg = syscallGetVarargI();
|
|
stream.flags |= arg;
|
|
return 0;
|
|
}
|
|
case 12: {
|
|
var arg = syscallGetVarargP();
|
|
var offset = 0;
|
|
// We're always unlocked.
|
|
HEAP16[(((arg)+(offset))>>1)] = 2;checkInt16(2);
|
|
return 0;
|
|
}
|
|
case 13:
|
|
case 14:
|
|
// Pretend that the locking is successful. These are process-level locks,
|
|
// and Emscripten programs are a single process. If we supported linking a
|
|
// filesystem between programs, we'd need to do more here.
|
|
// See https://github.com/emscripten-core/emscripten/issues/23697
|
|
return 0;
|
|
}
|
|
return -28;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_fstat64(fd, buf) {
|
|
try {
|
|
|
|
return SYSCALLS.writeStat(buf, FS.fstat(fd));
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var INT53_MAX = 9007199254740992;
|
|
|
|
var INT53_MIN = -9007199254740992;
|
|
var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num);
|
|
function ___syscall_ftruncate64(fd, length) {
|
|
length = bigintToI53Checked(length);
|
|
|
|
|
|
try {
|
|
|
|
if (isNaN(length)) return -61;
|
|
FS.ftruncate(fd, length);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
;
|
|
}
|
|
|
|
|
|
var stringToUTF8 = (str, outPtr, maxBytesToWrite) => {
|
|
assert(typeof maxBytesToWrite == 'number', 'stringToUTF8(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!');
|
|
return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite);
|
|
};
|
|
function ___syscall_getcwd(buf, size) {
|
|
try {
|
|
|
|
if (size === 0) return -28;
|
|
var cwd = FS.cwd();
|
|
var cwdLengthInBytes = lengthBytesUTF8(cwd) + 1;
|
|
if (size < cwdLengthInBytes) return -68;
|
|
stringToUTF8(cwd, buf, size);
|
|
return cwdLengthInBytes;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_lstat64(path, buf) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
return SYSCALLS.writeStat(buf, FS.lstat(path));
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_mkdirat(dirfd, path, mode) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
FS.mkdir(path, mode, 0);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_newfstatat(dirfd, path, buf, flags) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
var nofollow = flags & 256;
|
|
var allowEmpty = flags & 4096;
|
|
flags = flags & (~6400);
|
|
assert(!flags, `unknown flags in __syscall_newfstatat: ${flags}`);
|
|
path = SYSCALLS.calculateAt(dirfd, path, allowEmpty);
|
|
return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path));
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
function ___syscall_openat(dirfd, path, flags, varargs) {
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
var mode = varargs ? syscallGetVarargI() : 0;
|
|
return FS.open(path, flags, mode).fd;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
function ___syscall_readlinkat(dirfd, path, buf, bufsize) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
if (bufsize <= 0) return -28;
|
|
var ret = FS.readlink(path);
|
|
|
|
var len = Math.min(bufsize, lengthBytesUTF8(ret));
|
|
var endChar = HEAP8[buf+len];
|
|
stringToUTF8(ret, buf, bufsize+1);
|
|
// readlink is one of the rare functions that write out a C string, but does never append a null to the output buffer(!)
|
|
// stringToUTF8() always appends a null byte, so restore the character under the null byte after the write.
|
|
HEAP8[buf+len] = endChar;
|
|
return len;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_rmdir(path) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
FS.rmdir(path);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_stat64(path, buf) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
return SYSCALLS.writeStat(buf, FS.stat(path));
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_unlinkat(dirfd, path, flags) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
if (!flags) {
|
|
FS.unlink(path);
|
|
} else if (flags === 512) {
|
|
FS.rmdir(path);
|
|
} else {
|
|
return -28;
|
|
}
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var readI53FromI64 = (ptr) => {
|
|
return HEAPU32[((ptr)>>2)] + HEAP32[(((ptr)+(4))>>2)] * 4294967296;
|
|
};
|
|
|
|
function ___syscall_utimensat(dirfd, path, times, flags) {
|
|
try {
|
|
|
|
path = SYSCALLS.getStr(path);
|
|
assert(!flags);
|
|
path = SYSCALLS.calculateAt(dirfd, path, true);
|
|
var now = Date.now(), atime, mtime;
|
|
if (!times) {
|
|
atime = now;
|
|
mtime = now;
|
|
} else {
|
|
var seconds = readI53FromI64(times);
|
|
var nanoseconds = HEAP32[(((times)+(8))>>2)];
|
|
if (nanoseconds == 1073741823) {
|
|
atime = now;
|
|
} else if (nanoseconds == 1073741822) {
|
|
atime = null;
|
|
} else {
|
|
atime = (seconds*1000) + (nanoseconds/(1000*1000));
|
|
}
|
|
times += 16;
|
|
seconds = readI53FromI64(times);
|
|
nanoseconds = HEAP32[(((times)+(8))>>2)];
|
|
if (nanoseconds == 1073741823) {
|
|
mtime = now;
|
|
} else if (nanoseconds == 1073741822) {
|
|
mtime = null;
|
|
} else {
|
|
mtime = (seconds*1000) + (nanoseconds/(1000*1000));
|
|
}
|
|
}
|
|
// null here means UTIME_OMIT was passed. If both were set to UTIME_OMIT then
|
|
// we can skip the call completely.
|
|
if ((mtime ?? atime) !== null) {
|
|
FS.utime(path, atime, mtime);
|
|
}
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var __abort_js = () =>
|
|
abort('native code called abort()');
|
|
|
|
var isLeapYear = (year) => year%4 === 0 && (year%100 !== 0 || year%400 === 0);
|
|
|
|
var MONTH_DAYS_LEAP_CUMULATIVE = [0,31,60,91,121,152,182,213,244,274,305,335];
|
|
|
|
var MONTH_DAYS_REGULAR_CUMULATIVE = [0,31,59,90,120,151,181,212,243,273,304,334];
|
|
var ydayFromDate = (date) => {
|
|
var leap = isLeapYear(date.getFullYear());
|
|
var monthDaysCumulative = (leap ? MONTH_DAYS_LEAP_CUMULATIVE : MONTH_DAYS_REGULAR_CUMULATIVE);
|
|
var yday = monthDaysCumulative[date.getMonth()] + date.getDate() - 1; // -1 since it's days since Jan 1
|
|
|
|
return yday;
|
|
};
|
|
|
|
function __localtime_js(time, tmPtr) {
|
|
time = bigintToI53Checked(time);
|
|
|
|
|
|
var date = new Date(time*1000);
|
|
HEAP32[((tmPtr)>>2)] = date.getSeconds();checkInt32(date.getSeconds());
|
|
HEAP32[(((tmPtr)+(4))>>2)] = date.getMinutes();checkInt32(date.getMinutes());
|
|
HEAP32[(((tmPtr)+(8))>>2)] = date.getHours();checkInt32(date.getHours());
|
|
HEAP32[(((tmPtr)+(12))>>2)] = date.getDate();checkInt32(date.getDate());
|
|
HEAP32[(((tmPtr)+(16))>>2)] = date.getMonth();checkInt32(date.getMonth());
|
|
HEAP32[(((tmPtr)+(20))>>2)] = date.getFullYear()-1900;checkInt32(date.getFullYear()-1900);
|
|
HEAP32[(((tmPtr)+(24))>>2)] = date.getDay();checkInt32(date.getDay());
|
|
|
|
var yday = ydayFromDate(date)|0;
|
|
HEAP32[(((tmPtr)+(28))>>2)] = yday;checkInt32(yday);
|
|
HEAP32[(((tmPtr)+(36))>>2)] = -(date.getTimezoneOffset() * 60);checkInt32(-(date.getTimezoneOffset() * 60));
|
|
|
|
// Attention: DST is in December in South, and some regions don't have DST at all.
|
|
var start = new Date(date.getFullYear(), 0, 1);
|
|
var summerOffset = new Date(date.getFullYear(), 6, 1).getTimezoneOffset();
|
|
var winterOffset = start.getTimezoneOffset();
|
|
var dst = (summerOffset != winterOffset && date.getTimezoneOffset() == Math.min(winterOffset, summerOffset))|0;
|
|
HEAP32[(((tmPtr)+(32))>>2)] = dst;checkInt32(dst);
|
|
;
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function __mmap_js(len, prot, flags, fd, offset, allocated, addr) {
|
|
offset = bigintToI53Checked(offset);
|
|
|
|
|
|
try {
|
|
|
|
// musl's mmap doesn't allow values over a certain limit
|
|
// see OFF_MASK in mmap.c.
|
|
assert(!isNaN(offset));
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var res = FS.mmap(stream, len, offset, prot, flags);
|
|
var ptr = res.ptr;
|
|
HEAP32[((allocated)>>2)] = res.allocated;checkInt32(res.allocated);
|
|
HEAPU32[((addr)>>2)] = ptr;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
;
|
|
}
|
|
|
|
|
|
function __munmap_js(addr, len, prot, flags, fd, offset) {
|
|
offset = bigintToI53Checked(offset);
|
|
|
|
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
if (prot & 2) {
|
|
SYSCALLS.doMsync(addr, stream, len, flags, offset);
|
|
}
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return -e.errno;
|
|
}
|
|
;
|
|
}
|
|
|
|
|
|
var __tzset_js = (timezone, daylight, std_name, dst_name) => {
|
|
// TODO: Use (malleable) environment variables instead of system settings.
|
|
var currentYear = new Date().getFullYear();
|
|
var winter = new Date(currentYear, 0, 1);
|
|
var summer = new Date(currentYear, 6, 1);
|
|
var winterOffset = winter.getTimezoneOffset();
|
|
var summerOffset = summer.getTimezoneOffset();
|
|
|
|
// Local standard timezone offset. Local standard time is not adjusted for
|
|
// daylight savings. This code uses the fact that getTimezoneOffset returns
|
|
// a greater value during Standard Time versus Daylight Saving Time (DST).
|
|
// Thus it determines the expected output during Standard Time, and it
|
|
// compares whether the output of the given date the same (Standard) or less
|
|
// (DST).
|
|
var stdTimezoneOffset = Math.max(winterOffset, summerOffset);
|
|
|
|
// timezone is specified as seconds west of UTC ("The external variable
|
|
// `timezone` shall be set to the difference, in seconds, between
|
|
// Coordinated Universal Time (UTC) and local standard time."), the same
|
|
// as returned by stdTimezoneOffset.
|
|
// See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html
|
|
HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60;
|
|
|
|
HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset);checkInt32(Number(winterOffset != summerOffset));
|
|
|
|
var extractZone = (timezoneOffset) => {
|
|
// Why inverse sign?
|
|
// Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset
|
|
var sign = timezoneOffset >= 0 ? "-" : "+";
|
|
|
|
var absOffset = Math.abs(timezoneOffset)
|
|
var hours = String(Math.floor(absOffset / 60)).padStart(2, "0");
|
|
var minutes = String(absOffset % 60).padStart(2, "0");
|
|
|
|
return `UTC${sign}${hours}${minutes}`;
|
|
}
|
|
|
|
var winterName = extractZone(winterOffset);
|
|
var summerName = extractZone(summerOffset);
|
|
assert(winterName);
|
|
assert(summerName);
|
|
assert(lengthBytesUTF8(winterName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${winterName})`);
|
|
assert(lengthBytesUTF8(summerName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${summerName})`);
|
|
if (summerOffset < winterOffset) {
|
|
// Northern hemisphere
|
|
stringToUTF8(winterName, std_name, 17);
|
|
stringToUTF8(summerName, dst_name, 17);
|
|
} else {
|
|
stringToUTF8(winterName, dst_name, 17);
|
|
stringToUTF8(summerName, std_name, 17);
|
|
}
|
|
};
|
|
|
|
var _emscripten_get_now = () => performance.now();
|
|
|
|
var _emscripten_date_now = () => Date.now();
|
|
|
|
var nowIsMonotonic = 1;
|
|
|
|
var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3;
|
|
|
|
function _clock_time_get(clk_id, ignored_precision, ptime) {
|
|
ignored_precision = bigintToI53Checked(ignored_precision);
|
|
|
|
|
|
if (!checkWasiClock(clk_id)) {
|
|
return 28;
|
|
}
|
|
var now;
|
|
// all wasi clocks but realtime are monotonic
|
|
if (clk_id === 0) {
|
|
now = _emscripten_date_now();
|
|
} else if (nowIsMonotonic) {
|
|
now = _emscripten_get_now();
|
|
} else {
|
|
return 52;
|
|
}
|
|
// "now" is in ms, and wasi times are in ns.
|
|
var nsec = Math.round(now * 1000 * 1000);
|
|
HEAP64[((ptime)>>3)] = BigInt(nsec);checkInt64(nsec);
|
|
return 0;
|
|
;
|
|
}
|
|
|
|
|
|
var getHeapMax = () =>
|
|
// Stay one Wasm page short of 4GB: while e.g. Chrome is able to allocate
|
|
// full 4GB Wasm memories, the size will wrap back to 0 bytes in Wasm side
|
|
// for any code that deals with heap sizes, which would require special
|
|
// casing all heap size related code to treat 0 specially.
|
|
2147483648;
|
|
var _emscripten_get_heap_max = () => getHeapMax();
|
|
|
|
|
|
|
|
|
|
|
|
var growMemory = (size) => {
|
|
var oldHeapSize = wasmMemory.buffer.byteLength;
|
|
var pages = ((size - oldHeapSize + 65535) / 65536) | 0;
|
|
try {
|
|
// round size grow request up to wasm page size (fixed 64KB per spec)
|
|
wasmMemory.grow(pages); // .grow() takes a delta compared to the previous size
|
|
updateMemoryViews();
|
|
return 1 /*success*/;
|
|
} catch(e) {
|
|
err(`growMemory: Attempted to grow heap from ${oldHeapSize} bytes to ${size} bytes, but got error: ${e}`);
|
|
}
|
|
// implicit 0 return to save code size (caller will cast "undefined" into 0
|
|
// anyhow)
|
|
};
|
|
var _emscripten_resize_heap = (requestedSize) => {
|
|
var oldSize = HEAPU8.length;
|
|
// With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
|
|
requestedSize >>>= 0;
|
|
// With multithreaded builds, races can happen (another thread might increase the size
|
|
// in between), so return a failure, and let the caller retry.
|
|
assert(requestedSize > oldSize);
|
|
|
|
// Memory resize rules:
|
|
// 1. Always increase heap size to at least the requested size, rounded up
|
|
// to next page multiple.
|
|
// 2a. If MEMORY_GROWTH_LINEAR_STEP == -1, excessively resize the heap
|
|
// geometrically: increase the heap size according to
|
|
// MEMORY_GROWTH_GEOMETRIC_STEP factor (default +20%), At most
|
|
// overreserve by MEMORY_GROWTH_GEOMETRIC_CAP bytes (default 96MB).
|
|
// 2b. If MEMORY_GROWTH_LINEAR_STEP != -1, excessively resize the heap
|
|
// linearly: increase the heap size by at least
|
|
// MEMORY_GROWTH_LINEAR_STEP bytes.
|
|
// 3. Max size for the heap is capped at 2048MB-WASM_PAGE_SIZE, or by
|
|
// MAXIMUM_MEMORY, or by ASAN limit, depending on which is smallest
|
|
// 4. If we were unable to allocate as much memory, it may be due to
|
|
// over-eager decision to excessively reserve due to (3) above.
|
|
// Hence if an allocation fails, cut down on the amount of excess
|
|
// growth, in an attempt to succeed to perform a smaller allocation.
|
|
|
|
// A limit is set for how much we can grow. We should not exceed that
|
|
// (the wasm binary specifies it, so if we tried, we'd fail anyhow).
|
|
var maxHeapSize = getHeapMax();
|
|
if (requestedSize > maxHeapSize) {
|
|
err(`Cannot enlarge memory, requested ${requestedSize} bytes, but the limit is ${maxHeapSize} bytes!`);
|
|
return false;
|
|
}
|
|
|
|
// Loop through potential heap size increases. If we attempt a too eager
|
|
// reservation that fails, cut down on the attempted size and reserve a
|
|
// smaller bump instead. (max 3 times, chosen somewhat arbitrarily)
|
|
for (var cutDown = 1; cutDown <= 4; cutDown *= 2) {
|
|
var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown); // ensure geometric growth
|
|
// but limit overreserving (default to capping at +96MB overgrowth at most)
|
|
overGrownHeapSize = Math.min(overGrownHeapSize, requestedSize + 100663296 );
|
|
|
|
var newSize = Math.min(maxHeapSize, alignMemory(Math.max(requestedSize, overGrownHeapSize), 65536));
|
|
|
|
var t0 = _emscripten_get_now();
|
|
var replacement = growMemory(newSize);
|
|
var t1 = _emscripten_get_now();
|
|
dbg(`Heap resize call from ${oldSize} to ${newSize} took ${(t1 - t0)} msecs. Success: ${!!replacement}`);
|
|
if (replacement) {
|
|
|
|
return true;
|
|
}
|
|
}
|
|
err(`Failed to grow the heap from ${oldSize} bytes to ${newSize} bytes, not enough memory!`);
|
|
return false;
|
|
};
|
|
|
|
var ENV = {
|
|
};
|
|
|
|
var getExecutableName = () => thisProgram || './this.program';
|
|
var getEnvStrings = () => {
|
|
if (!getEnvStrings.strings) {
|
|
// Default values.
|
|
// Browser language detection #8751
|
|
var lang = (globalThis.navigator?.language ?? 'C').replace('-', '_') + '.UTF-8';
|
|
var env = {
|
|
'USER': 'web_user',
|
|
'LOGNAME': 'web_user',
|
|
'PATH': '/',
|
|
'PWD': '/',
|
|
'HOME': '/home/web_user',
|
|
'LANG': lang,
|
|
'_': getExecutableName()
|
|
};
|
|
// Apply the user-provided values, if any.
|
|
for (var x in ENV) {
|
|
// x is a key in ENV; if ENV[x] is undefined, that means it was
|
|
// explicitly set to be so. We allow user code to do that to
|
|
// force variables with default values to remain unset.
|
|
if (ENV[x] === undefined) delete env[x];
|
|
else env[x] = ENV[x];
|
|
}
|
|
var strings = [];
|
|
for (var x in env) {
|
|
strings.push(`${x}=${env[x]}`);
|
|
}
|
|
getEnvStrings.strings = strings;
|
|
}
|
|
return getEnvStrings.strings;
|
|
};
|
|
|
|
var _environ_get = (__environ, environ_buf) => {
|
|
var bufSize = 0;
|
|
var envp = 0;
|
|
for (var string of getEnvStrings()) {
|
|
var ptr = environ_buf + bufSize;
|
|
HEAPU32[(((__environ)+(envp))>>2)] = ptr;
|
|
bufSize += stringToUTF8(string, ptr, Infinity) + 1;
|
|
envp += 4;
|
|
}
|
|
return 0;
|
|
};
|
|
|
|
|
|
var _environ_sizes_get = (penviron_count, penviron_buf_size) => {
|
|
var strings = getEnvStrings();
|
|
HEAPU32[((penviron_count)>>2)] = strings.length;checkInt32(strings.length);
|
|
var bufSize = 0;
|
|
for (var string of strings) {
|
|
bufSize += lengthBytesUTF8(string) + 1;
|
|
}
|
|
HEAPU32[((penviron_buf_size)>>2)] = bufSize;checkInt32(bufSize);
|
|
return 0;
|
|
};
|
|
|
|
function _fd_close(fd) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.close(stream);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
function _fd_fdstat_get(fd, pbuf) {
|
|
try {
|
|
|
|
var rightsBase = 0;
|
|
var rightsInheriting = 0;
|
|
var flags = 0;
|
|
{
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
// All character devices are terminals (other things a Linux system would
|
|
// assume is a character device, like the mouse, we have special APIs for).
|
|
var type = stream.tty ? 2 :
|
|
FS.isDir(stream.mode) ? 3 :
|
|
FS.isLink(stream.mode) ? 7 :
|
|
4;
|
|
}
|
|
HEAP8[pbuf] = type;checkInt8(type);
|
|
HEAP16[(((pbuf)+(2))>>1)] = flags;checkInt16(flags);
|
|
HEAP64[(((pbuf)+(8))>>3)] = BigInt(rightsBase);checkInt64(rightsBase);
|
|
HEAP64[(((pbuf)+(16))>>3)] = BigInt(rightsInheriting);checkInt64(rightsInheriting);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
/** @param {number=} offset */
|
|
var doReadv = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov)>>2)];
|
|
var len = HEAPU32[(((iov)+(4))>>2)];
|
|
iov += 8;
|
|
var curr = FS.read(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) break; // nothing more to read
|
|
if (typeof offset != 'undefined') {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_read(fd, iov, iovcnt, pnum) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doReadv(stream, iov, iovcnt);
|
|
HEAPU32[((pnum)>>2)] = num;checkInt32(num);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
|
|
function _fd_seek(fd, offset, whence, newOffset) {
|
|
offset = bigintToI53Checked(offset);
|
|
|
|
|
|
try {
|
|
|
|
if (isNaN(offset)) return 61;
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.llseek(stream, offset, whence);
|
|
HEAP64[((newOffset)>>3)] = BigInt(stream.position);checkInt64(stream.position);
|
|
if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
;
|
|
}
|
|
|
|
function _fd_sync(fd) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var rtn = stream.stream_ops?.fsync?.(stream);
|
|
return rtn;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
/** @param {number=} offset */
|
|
var doWritev = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov)>>2)];
|
|
var len = HEAPU32[(((iov)+(4))>>2)];
|
|
iov += 8;
|
|
var curr = FS.write(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) {
|
|
// No more space to write.
|
|
break;
|
|
}
|
|
if (typeof offset != 'undefined') {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_write(fd, iov, iovcnt, pnum) {
|
|
try {
|
|
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doWritev(stream, iov, iovcnt);
|
|
HEAPU32[((pnum)>>2)] = num;checkInt32(num);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
var getCFunc = (ident) => {
|
|
var func = Module['_' + ident]; // closure exported function
|
|
assert(func, 'Cannot call unknown function ' + ident + ', make sure it is exported');
|
|
return func;
|
|
};
|
|
|
|
var writeArrayToMemory = (array, buffer) => {
|
|
assert(array.length >= 0, 'writeArrayToMemory array must have a length (should be an array or typed array)')
|
|
HEAP8.set(array, buffer);
|
|
};
|
|
|
|
|
|
|
|
var stackAlloc = (sz) => __emscripten_stack_alloc(sz);
|
|
var stringToUTF8OnStack = (str) => {
|
|
var size = lengthBytesUTF8(str) + 1;
|
|
var ret = stackAlloc(size);
|
|
stringToUTF8(str, ret, size);
|
|
return ret;
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
* @param {string|null=} returnType
|
|
* @param {Array=} argTypes
|
|
* @param {Array=} args
|
|
* @param {Object=} opts
|
|
*/
|
|
var ccall = (ident, returnType, argTypes, args, opts) => {
|
|
// For fast lookup of conversion functions
|
|
var toC = {
|
|
'string': (str) => {
|
|
var ret = 0;
|
|
if (str !== null && str !== undefined && str !== 0) { // null string
|
|
ret = stringToUTF8OnStack(str);
|
|
}
|
|
return ret;
|
|
},
|
|
'array': (arr) => {
|
|
var ret = stackAlloc(arr.length);
|
|
writeArrayToMemory(arr, ret);
|
|
return ret;
|
|
}
|
|
};
|
|
|
|
function convertReturnValue(ret) {
|
|
if (returnType === 'string') {
|
|
return UTF8ToString(ret);
|
|
}
|
|
if (returnType === 'boolean') return Boolean(ret);
|
|
return ret;
|
|
}
|
|
|
|
var func = getCFunc(ident);
|
|
var cArgs = [];
|
|
var stack = 0;
|
|
assert(returnType !== 'array', 'Return type should not be "array".');
|
|
if (args) {
|
|
for (var i = 0; i < args.length; i++) {
|
|
var converter = toC[argTypes[i]];
|
|
if (converter) {
|
|
if (stack === 0) stack = stackSave();
|
|
cArgs[i] = converter(args[i]);
|
|
} else {
|
|
cArgs[i] = args[i];
|
|
}
|
|
}
|
|
}
|
|
var ret = func(...cArgs);
|
|
function onDone(ret) {
|
|
if (stack !== 0) stackRestore(stack);
|
|
return convertReturnValue(ret);
|
|
}
|
|
|
|
ret = onDone(ret);
|
|
return ret;
|
|
};
|
|
|
|
/**
|
|
* @param {string=} returnType
|
|
* @param {Array=} argTypes
|
|
* @param {Object=} opts
|
|
*/
|
|
var cwrap = (ident, returnType, argTypes, opts) => {
|
|
return (...args) => ccall(ident, returnType, argTypes, args, opts);
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
var stringToNewUTF8 = (str) => {
|
|
var size = lengthBytesUTF8(str) + 1;
|
|
var ret = _malloc(size);
|
|
if (ret) stringToUTF8(str, ret, size);
|
|
return ret;
|
|
};
|
|
|
|
|
|
var functionsInTableMap;
|
|
|
|
var freeTableIndexes = [];
|
|
|
|
var wasmTableMirror = [];
|
|
|
|
|
|
var getWasmTableEntry = (funcPtr) => {
|
|
var func = wasmTableMirror[funcPtr];
|
|
if (!func) {
|
|
/** @suppress {checkTypes} */
|
|
wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr);
|
|
}
|
|
/** @suppress {checkTypes} */
|
|
assert(wasmTable.get(funcPtr) == func, 'JavaScript-side Wasm function table mirror is out of date!');
|
|
return func;
|
|
};
|
|
|
|
|
|
var setWasmTableEntry = (idx, func) => {
|
|
/** @suppress {checkTypes} */
|
|
wasmTable.set(idx, func);
|
|
// With ABORT_ON_WASM_EXCEPTIONS wasmTable.get is overridden to return wrapped
|
|
// functions so we need to call it here to retrieve the potential wrapper correctly
|
|
// instead of just storing 'func' directly into wasmTableMirror
|
|
/** @suppress {checkTypes} */
|
|
wasmTableMirror[idx] = wasmTable.get(idx);
|
|
};
|
|
|
|
var removeFunction = (index) => {
|
|
functionsInTableMap.delete(getWasmTableEntry(index));
|
|
setWasmTableEntry(index, null);
|
|
freeTableIndexes.push(index);
|
|
};
|
|
|
|
|
|
var updateTableMap = (offset, count) => {
|
|
if (functionsInTableMap) {
|
|
for (var i = offset; i < offset + count; i++) {
|
|
var item = getWasmTableEntry(i);
|
|
// Ignore null values.
|
|
if (item) {
|
|
functionsInTableMap.set(item, i);
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
|
|
var getFunctionAddress = (func) => {
|
|
// First, create the map if this is the first use.
|
|
if (!functionsInTableMap) {
|
|
functionsInTableMap = new WeakMap();
|
|
updateTableMap(0, wasmTable.length);
|
|
}
|
|
return functionsInTableMap.get(func) || 0;
|
|
};
|
|
|
|
|
|
|
|
var getEmptyTableSlot = () => {
|
|
// Reuse a free index if there is one, otherwise grow.
|
|
if (freeTableIndexes.length) {
|
|
return freeTableIndexes.pop();
|
|
}
|
|
try {
|
|
// Grow the table
|
|
return wasmTable['grow'](1);
|
|
} catch (err) {
|
|
if (!(err instanceof RangeError)) {
|
|
throw err;
|
|
}
|
|
abort('Unable to grow wasm table. Set ALLOW_TABLE_GROWTH.');
|
|
}
|
|
};
|
|
|
|
|
|
var uleb128EncodeWithLen = (arr) => {
|
|
const n = arr.length;
|
|
assert(n < 16384);
|
|
// Note: this LEB128 length encoding produces extra byte for n < 128,
|
|
// but we don't care as it's only used in a temporary representation.
|
|
return [(n % 128) | 128, n >> 7, ...arr];
|
|
};
|
|
|
|
|
|
var wasmTypeCodes = {
|
|
'i': 0x7f, // i32
|
|
'p': 0x7f, // i32
|
|
'j': 0x7e, // i64
|
|
'f': 0x7d, // f32
|
|
'd': 0x7c, // f64
|
|
'e': 0x6f, // externref
|
|
};
|
|
var generateTypePack = (types) => uleb128EncodeWithLen(Array.from(types, (type) => {
|
|
var code = wasmTypeCodes[type];
|
|
assert(code, `invalid signature char: ${type}`);
|
|
return code;
|
|
}));
|
|
var convertJsFunctionToWasm = (func, sig) => {
|
|
|
|
// Rest of the module is static
|
|
var bytes = Uint8Array.of(
|
|
0x00, 0x61, 0x73, 0x6d, // magic ("\0asm")
|
|
0x01, 0x00, 0x00, 0x00, // version: 1
|
|
0x01, // Type section code
|
|
// The module is static, with the exception of the type section, which is
|
|
// generated based on the signature passed in.
|
|
...uleb128EncodeWithLen([
|
|
0x01, // count: 1
|
|
0x60 /* form: func */,
|
|
// param types
|
|
...generateTypePack(sig.slice(1)),
|
|
// return types (for now only supporting [] if `void` and single [T] otherwise)
|
|
...generateTypePack(sig[0] === 'v' ? '' : sig[0])
|
|
]),
|
|
// The rest of the module is static
|
|
0x02, 0x07, // import section
|
|
// (import "e" "f" (func 0 (type 0)))
|
|
0x01, 0x01, 0x65, 0x01, 0x66, 0x00, 0x00,
|
|
0x07, 0x05, // export section
|
|
// (export "f" (func 0 (type 0)))
|
|
0x01, 0x01, 0x66, 0x00, 0x00,
|
|
);
|
|
|
|
// We can compile this wasm module synchronously because it is very small.
|
|
// This accepts an import (at "e.f"), that it reroutes to an export (at "f")
|
|
var module = new WebAssembly.Module(bytes);
|
|
var instance = new WebAssembly.Instance(module, { 'e': { 'f': func } });
|
|
var wrappedFunc = instance.exports['f'];
|
|
return wrappedFunc;
|
|
};
|
|
|
|
|
|
|
|
/** @param {string=} sig */
|
|
var addFunction = (func, sig) => {
|
|
assert(typeof func != 'undefined');
|
|
// Check if the function is already in the table, to ensure each function
|
|
// gets a unique index.
|
|
var rtn = getFunctionAddress(func);
|
|
if (rtn) {
|
|
return rtn;
|
|
}
|
|
|
|
// It's not in the table, add it now.
|
|
|
|
// Make sure functionsInTableMap is actually up to date, that is, that this
|
|
// function is not actually in the wasm Table despite not being tracked in
|
|
// functionsInTableMap.
|
|
for (var i = 0; i < wasmTable.length; i++) {
|
|
assert(getWasmTableEntry(i) != func, 'function in Table but not functionsInTableMap');
|
|
}
|
|
|
|
var ret = getEmptyTableSlot();
|
|
|
|
// Set the new value.
|
|
try {
|
|
// Attempting to call this with JS function will cause table.set() to fail
|
|
setWasmTableEntry(ret, func);
|
|
} catch (err) {
|
|
if (!(err instanceof TypeError)) {
|
|
throw err;
|
|
}
|
|
assert(typeof sig != 'undefined', 'Missing signature argument to addFunction: ' + func);
|
|
var wrapped = convertJsFunctionToWasm(func, sig);
|
|
setWasmTableEntry(ret, wrapped);
|
|
}
|
|
|
|
functionsInTableMap.set(func, ret);
|
|
|
|
return ret;
|
|
};
|
|
|
|
FS.createPreloadedFile = FS_createPreloadedFile;
|
|
FS.preloadFile = FS_preloadFile;
|
|
FS.staticInit();;
|
|
// End JS library code
|
|
|
|
// include: postlibrary.js
|
|
// This file is included after the automatically-generated JS library code
|
|
// but before the wasm module is created.
|
|
|
|
{
|
|
|
|
// Begin ATMODULES hooks
|
|
if (Module['noExitRuntime']) noExitRuntime = Module['noExitRuntime'];
|
|
if (Module['preloadPlugins']) preloadPlugins = Module['preloadPlugins'];
|
|
if (Module['print']) out = Module['print'];
|
|
if (Module['printErr']) err = Module['printErr'];
|
|
if (Module['wasmBinary']) wasmBinary = Module['wasmBinary'];
|
|
// End ATMODULES hooks
|
|
|
|
checkIncomingModuleAPI();
|
|
|
|
if (Module['arguments']) arguments_ = Module['arguments'];
|
|
if (Module['thisProgram']) thisProgram = Module['thisProgram'];
|
|
|
|
// Assertions on removed incoming Module JS APIs.
|
|
assert(typeof Module['memoryInitializerPrefixURL'] == 'undefined', 'Module.memoryInitializerPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['pthreadMainPrefixURL'] == 'undefined', 'Module.pthreadMainPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['cdInitializerPrefixURL'] == 'undefined', 'Module.cdInitializerPrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['filePackagePrefixURL'] == 'undefined', 'Module.filePackagePrefixURL option was removed, use Module.locateFile instead');
|
|
assert(typeof Module['read'] == 'undefined', 'Module.read option was removed');
|
|
assert(typeof Module['readAsync'] == 'undefined', 'Module.readAsync option was removed (modify readAsync in JS)');
|
|
assert(typeof Module['readBinary'] == 'undefined', 'Module.readBinary option was removed (modify readBinary in JS)');
|
|
assert(typeof Module['setWindowTitle'] == 'undefined', 'Module.setWindowTitle option was removed (modify emscripten_set_window_title in JS)');
|
|
assert(typeof Module['TOTAL_MEMORY'] == 'undefined', 'Module.TOTAL_MEMORY has been renamed Module.INITIAL_MEMORY');
|
|
assert(typeof Module['ENVIRONMENT'] == 'undefined', 'Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -sENVIRONMENT=web or -sENVIRONMENT=node)');
|
|
assert(typeof Module['STACK_SIZE'] == 'undefined', 'STACK_SIZE can no longer be set at runtime. Use -sSTACK_SIZE at link time')
|
|
// If memory is defined in wasm, the user can't provide it, or set INITIAL_MEMORY
|
|
assert(typeof Module['wasmMemory'] == 'undefined', 'Use of `wasmMemory` detected. Use -sIMPORTED_MEMORY to define wasmMemory externally');
|
|
assert(typeof Module['INITIAL_MEMORY'] == 'undefined', 'Detected runtime INITIAL_MEMORY setting. Use -sIMPORTED_MEMORY to define wasmMemory dynamically');
|
|
|
|
if (Module['preInit']) {
|
|
if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']];
|
|
while (Module['preInit'].length > 0) {
|
|
Module['preInit'].shift()();
|
|
}
|
|
}
|
|
consumedModuleProp('preInit');
|
|
}
|
|
|
|
// Begin runtime exports
|
|
Module['stackSave'] = stackSave;
|
|
Module['stackRestore'] = stackRestore;
|
|
Module['stackAlloc'] = stackAlloc;
|
|
Module['cwrap'] = cwrap;
|
|
Module['addFunction'] = addFunction;
|
|
Module['removeFunction'] = removeFunction;
|
|
Module['UTF8ToString'] = UTF8ToString;
|
|
Module['stringToNewUTF8'] = stringToNewUTF8;
|
|
Module['writeArrayToMemory'] = writeArrayToMemory;
|
|
var missingLibrarySymbols = [
|
|
'writeI53ToI64',
|
|
'writeI53ToI64Clamped',
|
|
'writeI53ToI64Signaling',
|
|
'writeI53ToU64Clamped',
|
|
'writeI53ToU64Signaling',
|
|
'readI53FromU64',
|
|
'convertI32PairToI53',
|
|
'convertI32PairToI53Checked',
|
|
'convertU32PairToI53',
|
|
'getTempRet0',
|
|
'setTempRet0',
|
|
'createNamedFunction',
|
|
'exitJS',
|
|
'withStackSave',
|
|
'inetPton4',
|
|
'inetNtop4',
|
|
'inetPton6',
|
|
'inetNtop6',
|
|
'readSockaddr',
|
|
'writeSockaddr',
|
|
'readEmAsmArgs',
|
|
'jstoi_q',
|
|
'autoResumeAudioContext',
|
|
'getDynCaller',
|
|
'dynCall',
|
|
'handleException',
|
|
'keepRuntimeAlive',
|
|
'runtimeKeepalivePush',
|
|
'runtimeKeepalivePop',
|
|
'callUserCallback',
|
|
'maybeExit',
|
|
'asmjsMangle',
|
|
'HandleAllocator',
|
|
'addOnInit',
|
|
'addOnPostCtor',
|
|
'addOnPreMain',
|
|
'addOnExit',
|
|
'STACK_SIZE',
|
|
'STACK_ALIGN',
|
|
'POINTER_SIZE',
|
|
'ASSERTIONS',
|
|
'intArrayToString',
|
|
'AsciiToString',
|
|
'stringToAscii',
|
|
'UTF16ToString',
|
|
'stringToUTF16',
|
|
'lengthBytesUTF16',
|
|
'UTF32ToString',
|
|
'stringToUTF32',
|
|
'lengthBytesUTF32',
|
|
'registerKeyEventCallback',
|
|
'maybeCStringToJsString',
|
|
'findEventTarget',
|
|
'getBoundingClientRect',
|
|
'fillMouseEventData',
|
|
'registerMouseEventCallback',
|
|
'registerWheelEventCallback',
|
|
'registerUiEventCallback',
|
|
'registerFocusEventCallback',
|
|
'fillDeviceOrientationEventData',
|
|
'registerDeviceOrientationEventCallback',
|
|
'fillDeviceMotionEventData',
|
|
'registerDeviceMotionEventCallback',
|
|
'screenOrientation',
|
|
'fillOrientationChangeEventData',
|
|
'registerOrientationChangeEventCallback',
|
|
'fillFullscreenChangeEventData',
|
|
'registerFullscreenChangeEventCallback',
|
|
'JSEvents_requestFullscreen',
|
|
'JSEvents_resizeCanvasForFullscreen',
|
|
'registerRestoreOldStyle',
|
|
'hideEverythingExceptGivenElement',
|
|
'restoreHiddenElements',
|
|
'setLetterbox',
|
|
'softFullscreenResizeWebGLRenderTarget',
|
|
'doRequestFullscreen',
|
|
'fillPointerlockChangeEventData',
|
|
'registerPointerlockChangeEventCallback',
|
|
'registerPointerlockErrorEventCallback',
|
|
'requestPointerLock',
|
|
'fillVisibilityChangeEventData',
|
|
'registerVisibilityChangeEventCallback',
|
|
'registerTouchEventCallback',
|
|
'fillGamepadEventData',
|
|
'registerGamepadEventCallback',
|
|
'registerBeforeUnloadEventCallback',
|
|
'fillBatteryEventData',
|
|
'registerBatteryEventCallback',
|
|
'setCanvasElementSize',
|
|
'getCanvasElementSize',
|
|
'jsStackTrace',
|
|
'getCallstack',
|
|
'convertPCtoSourceLocation',
|
|
'wasiRightsToMuslOFlags',
|
|
'wasiOFlagsToMuslOFlags',
|
|
'safeSetTimeout',
|
|
'setImmediateWrapped',
|
|
'safeRequestAnimationFrame',
|
|
'clearImmediateWrapped',
|
|
'registerPostMainLoop',
|
|
'registerPreMainLoop',
|
|
'getPromise',
|
|
'makePromise',
|
|
'idsToPromises',
|
|
'makePromiseCallback',
|
|
'ExceptionInfo',
|
|
'findMatchingCatch',
|
|
'Browser_asyncPrepareDataCounter',
|
|
'arraySum',
|
|
'addDays',
|
|
'getSocketFromFD',
|
|
'getSocketAddress',
|
|
'FS_mkdirTree',
|
|
'_setNetworkCallback',
|
|
'heapObjectForWebGLType',
|
|
'toTypedArrayIndex',
|
|
'webgl_enable_ANGLE_instanced_arrays',
|
|
'webgl_enable_OES_vertex_array_object',
|
|
'webgl_enable_WEBGL_draw_buffers',
|
|
'webgl_enable_WEBGL_multi_draw',
|
|
'webgl_enable_EXT_polygon_offset_clamp',
|
|
'webgl_enable_EXT_clip_control',
|
|
'webgl_enable_WEBGL_polygon_mode',
|
|
'emscriptenWebGLGet',
|
|
'computeUnpackAlignedImageSize',
|
|
'colorChannelsInGlTextureFormat',
|
|
'emscriptenWebGLGetTexPixelData',
|
|
'emscriptenWebGLGetUniform',
|
|
'webglGetUniformLocation',
|
|
'webglPrepareUniformLocationsBeforeFirstUse',
|
|
'webglGetLeftBracePos',
|
|
'emscriptenWebGLGetVertexAttrib',
|
|
'__glGetActiveAttribOrUniform',
|
|
'writeGLArray',
|
|
'registerWebGlEventCallback',
|
|
'runAndAbortIfError',
|
|
'ALLOC_NORMAL',
|
|
'ALLOC_STACK',
|
|
'allocate',
|
|
'writeStringToMemory',
|
|
'writeAsciiToMemory',
|
|
'allocateUTF8',
|
|
'allocateUTF8OnStack',
|
|
'demangle',
|
|
'stackTrace',
|
|
'getNativeTypeSize',
|
|
];
|
|
missingLibrarySymbols.forEach(missingLibrarySymbol)
|
|
|
|
var unexportedSymbols = [
|
|
'run',
|
|
'out',
|
|
'err',
|
|
'callMain',
|
|
'abort',
|
|
'wasmExports',
|
|
'HEAPF32',
|
|
'HEAPF64',
|
|
'HEAP8',
|
|
'HEAPU8',
|
|
'HEAP16',
|
|
'HEAPU16',
|
|
'HEAP32',
|
|
'HEAPU32',
|
|
'HEAP64',
|
|
'HEAPU64',
|
|
'writeStackCookie',
|
|
'checkStackCookie',
|
|
'readI53FromI64',
|
|
'INT53_MAX',
|
|
'INT53_MIN',
|
|
'bigintToI53Checked',
|
|
'ptrToString',
|
|
'zeroMemory',
|
|
'getHeapMax',
|
|
'growMemory',
|
|
'ENV',
|
|
'setStackLimits',
|
|
'ERRNO_CODES',
|
|
'strError',
|
|
'DNS',
|
|
'Protocols',
|
|
'Sockets',
|
|
'timers',
|
|
'warnOnce',
|
|
'readEmAsmArgsArray',
|
|
'getExecutableName',
|
|
'asyncLoad',
|
|
'alignMemory',
|
|
'mmapAlloc',
|
|
'wasmTable',
|
|
'wasmMemory',
|
|
'getUniqueRunDependency',
|
|
'noExitRuntime',
|
|
'addRunDependency',
|
|
'removeRunDependency',
|
|
'addOnPreRun',
|
|
'addOnPostRun',
|
|
'ccall',
|
|
'convertJsFunctionToWasm',
|
|
'freeTableIndexes',
|
|
'functionsInTableMap',
|
|
'getEmptyTableSlot',
|
|
'updateTableMap',
|
|
'getFunctionAddress',
|
|
'setValue',
|
|
'getValue',
|
|
'PATH',
|
|
'PATH_FS',
|
|
'UTF8Decoder',
|
|
'UTF8ArrayToString',
|
|
'stringToUTF8Array',
|
|
'stringToUTF8',
|
|
'lengthBytesUTF8',
|
|
'intArrayFromString',
|
|
'UTF16Decoder',
|
|
'stringToUTF8OnStack',
|
|
'JSEvents',
|
|
'specialHTMLTargets',
|
|
'findCanvasEventTarget',
|
|
'currentFullscreenStrategy',
|
|
'restoreOldWindowedStyle',
|
|
'UNWIND_CACHE',
|
|
'ExitStatus',
|
|
'getEnvStrings',
|
|
'checkWasiClock',
|
|
'doReadv',
|
|
'doWritev',
|
|
'initRandomFill',
|
|
'randomFill',
|
|
'emSetImmediate',
|
|
'emClearImmediate_deps',
|
|
'emClearImmediate',
|
|
'promiseMap',
|
|
'uncaughtExceptionCount',
|
|
'exceptionLast',
|
|
'exceptionCaught',
|
|
'Browser',
|
|
'requestFullscreen',
|
|
'requestFullScreen',
|
|
'setCanvasSize',
|
|
'getUserMedia',
|
|
'createContext',
|
|
'getPreloadedImageData__data',
|
|
'wget',
|
|
'MONTH_DAYS_REGULAR',
|
|
'MONTH_DAYS_LEAP',
|
|
'MONTH_DAYS_REGULAR_CUMULATIVE',
|
|
'MONTH_DAYS_LEAP_CUMULATIVE',
|
|
'isLeapYear',
|
|
'ydayFromDate',
|
|
'SYSCALLS',
|
|
'preloadPlugins',
|
|
'FS_createPreloadedFile',
|
|
'FS_preloadFile',
|
|
'FS_modeStringToFlags',
|
|
'FS_getMode',
|
|
'FS_stdin_getChar_buffer',
|
|
'FS_stdin_getChar',
|
|
'FS_unlink',
|
|
'FS_createPath',
|
|
'FS_createDevice',
|
|
'FS_readFile',
|
|
'FS',
|
|
'FS_root',
|
|
'FS_mounts',
|
|
'FS_devices',
|
|
'FS_streams',
|
|
'FS_nextInode',
|
|
'FS_nameTable',
|
|
'FS_currentPath',
|
|
'FS_initialized',
|
|
'FS_ignorePermissions',
|
|
'FS_filesystems',
|
|
'FS_syncFSRequests',
|
|
'FS_readFiles',
|
|
'FS_lookupPath',
|
|
'FS_getPath',
|
|
'FS_hashName',
|
|
'FS_hashAddNode',
|
|
'FS_hashRemoveNode',
|
|
'FS_lookupNode',
|
|
'FS_createNode',
|
|
'FS_destroyNode',
|
|
'FS_isRoot',
|
|
'FS_isMountpoint',
|
|
'FS_isFile',
|
|
'FS_isDir',
|
|
'FS_isLink',
|
|
'FS_isChrdev',
|
|
'FS_isBlkdev',
|
|
'FS_isFIFO',
|
|
'FS_isSocket',
|
|
'FS_flagsToPermissionString',
|
|
'FS_nodePermissions',
|
|
'FS_mayLookup',
|
|
'FS_mayCreate',
|
|
'FS_mayDelete',
|
|
'FS_mayOpen',
|
|
'FS_checkOpExists',
|
|
'FS_nextfd',
|
|
'FS_getStreamChecked',
|
|
'FS_getStream',
|
|
'FS_createStream',
|
|
'FS_closeStream',
|
|
'FS_dupStream',
|
|
'FS_doSetAttr',
|
|
'FS_chrdev_stream_ops',
|
|
'FS_major',
|
|
'FS_minor',
|
|
'FS_makedev',
|
|
'FS_registerDevice',
|
|
'FS_getDevice',
|
|
'FS_getMounts',
|
|
'FS_syncfs',
|
|
'FS_mount',
|
|
'FS_unmount',
|
|
'FS_lookup',
|
|
'FS_mknod',
|
|
'FS_statfs',
|
|
'FS_statfsStream',
|
|
'FS_statfsNode',
|
|
'FS_create',
|
|
'FS_mkdir',
|
|
'FS_mkdev',
|
|
'FS_symlink',
|
|
'FS_rename',
|
|
'FS_rmdir',
|
|
'FS_readdir',
|
|
'FS_readlink',
|
|
'FS_stat',
|
|
'FS_fstat',
|
|
'FS_lstat',
|
|
'FS_doChmod',
|
|
'FS_chmod',
|
|
'FS_lchmod',
|
|
'FS_fchmod',
|
|
'FS_doChown',
|
|
'FS_chown',
|
|
'FS_lchown',
|
|
'FS_fchown',
|
|
'FS_doTruncate',
|
|
'FS_truncate',
|
|
'FS_ftruncate',
|
|
'FS_utime',
|
|
'FS_open',
|
|
'FS_close',
|
|
'FS_isClosed',
|
|
'FS_llseek',
|
|
'FS_read',
|
|
'FS_write',
|
|
'FS_mmap',
|
|
'FS_msync',
|
|
'FS_ioctl',
|
|
'FS_writeFile',
|
|
'FS_cwd',
|
|
'FS_chdir',
|
|
'FS_createDefaultDirectories',
|
|
'FS_createDefaultDevices',
|
|
'FS_createSpecialDirectories',
|
|
'FS_createStandardStreams',
|
|
'FS_staticInit',
|
|
'FS_init',
|
|
'FS_quit',
|
|
'FS_findObject',
|
|
'FS_analyzePath',
|
|
'FS_createFile',
|
|
'FS_createDataFile',
|
|
'FS_forceLoadFile',
|
|
'FS_createLazyFile',
|
|
'FS_absolutePath',
|
|
'FS_createFolder',
|
|
'FS_createLink',
|
|
'FS_joinPath',
|
|
'FS_mmapAlloc',
|
|
'FS_standardizePath',
|
|
'MEMFS',
|
|
'TTY',
|
|
'PIPEFS',
|
|
'SOCKFS',
|
|
'tempFixedLengthArray',
|
|
'miniTempWebGLFloatBuffers',
|
|
'miniTempWebGLIntBuffers',
|
|
'GL',
|
|
'AL',
|
|
'GLUT',
|
|
'EGL',
|
|
'GLEW',
|
|
'IDBStore',
|
|
'SDL',
|
|
'SDL_gfx',
|
|
'print',
|
|
'printErr',
|
|
'jstoi_s',
|
|
];
|
|
unexportedSymbols.forEach(unexportedRuntimeSymbol);
|
|
|
|
// End runtime exports
|
|
// Begin JS library exports
|
|
// End JS library exports
|
|
|
|
// end include: postlibrary.js
|
|
|
|
function checkIncomingModuleAPI() {
|
|
ignoredModuleProp('fetchSettings');
|
|
}
|
|
|
|
// Imports from the Wasm binary.
|
|
var _sqlite3_free = Module['_sqlite3_free'] = makeInvalidEarlyAccess('_sqlite3_free');
|
|
var _sqlite3_value_text = Module['_sqlite3_value_text'] = makeInvalidEarlyAccess('_sqlite3_value_text');
|
|
var _sqlite3_prepare_v2 = Module['_sqlite3_prepare_v2'] = makeInvalidEarlyAccess('_sqlite3_prepare_v2');
|
|
var _sqlite3_step = Module['_sqlite3_step'] = makeInvalidEarlyAccess('_sqlite3_step');
|
|
var _sqlite3_reset = Module['_sqlite3_reset'] = makeInvalidEarlyAccess('_sqlite3_reset');
|
|
var _sqlite3_exec = Module['_sqlite3_exec'] = makeInvalidEarlyAccess('_sqlite3_exec');
|
|
var _sqlite3_finalize = Module['_sqlite3_finalize'] = makeInvalidEarlyAccess('_sqlite3_finalize');
|
|
var _sqlite3_column_name = Module['_sqlite3_column_name'] = makeInvalidEarlyAccess('_sqlite3_column_name');
|
|
var _sqlite3_column_text = Module['_sqlite3_column_text'] = makeInvalidEarlyAccess('_sqlite3_column_text');
|
|
var _sqlite3_column_type = Module['_sqlite3_column_type'] = makeInvalidEarlyAccess('_sqlite3_column_type');
|
|
var _sqlite3_errmsg = Module['_sqlite3_errmsg'] = makeInvalidEarlyAccess('_sqlite3_errmsg');
|
|
var _sqlite3_clear_bindings = Module['_sqlite3_clear_bindings'] = makeInvalidEarlyAccess('_sqlite3_clear_bindings');
|
|
var _sqlite3_value_blob = Module['_sqlite3_value_blob'] = makeInvalidEarlyAccess('_sqlite3_value_blob');
|
|
var _sqlite3_value_bytes = Module['_sqlite3_value_bytes'] = makeInvalidEarlyAccess('_sqlite3_value_bytes');
|
|
var _sqlite3_value_double = Module['_sqlite3_value_double'] = makeInvalidEarlyAccess('_sqlite3_value_double');
|
|
var _sqlite3_value_int = Module['_sqlite3_value_int'] = makeInvalidEarlyAccess('_sqlite3_value_int');
|
|
var _sqlite3_value_type = Module['_sqlite3_value_type'] = makeInvalidEarlyAccess('_sqlite3_value_type');
|
|
var _sqlite3_result_blob = Module['_sqlite3_result_blob'] = makeInvalidEarlyAccess('_sqlite3_result_blob');
|
|
var _sqlite3_result_double = Module['_sqlite3_result_double'] = makeInvalidEarlyAccess('_sqlite3_result_double');
|
|
var _sqlite3_result_error = Module['_sqlite3_result_error'] = makeInvalidEarlyAccess('_sqlite3_result_error');
|
|
var _sqlite3_result_int = Module['_sqlite3_result_int'] = makeInvalidEarlyAccess('_sqlite3_result_int');
|
|
var _sqlite3_result_int64 = Module['_sqlite3_result_int64'] = makeInvalidEarlyAccess('_sqlite3_result_int64');
|
|
var _sqlite3_result_null = Module['_sqlite3_result_null'] = makeInvalidEarlyAccess('_sqlite3_result_null');
|
|
var _sqlite3_result_text = Module['_sqlite3_result_text'] = makeInvalidEarlyAccess('_sqlite3_result_text');
|
|
var _sqlite3_aggregate_context = Module['_sqlite3_aggregate_context'] = makeInvalidEarlyAccess('_sqlite3_aggregate_context');
|
|
var _sqlite3_column_count = Module['_sqlite3_column_count'] = makeInvalidEarlyAccess('_sqlite3_column_count');
|
|
var _sqlite3_data_count = Module['_sqlite3_data_count'] = makeInvalidEarlyAccess('_sqlite3_data_count');
|
|
var _sqlite3_column_blob = Module['_sqlite3_column_blob'] = makeInvalidEarlyAccess('_sqlite3_column_blob');
|
|
var _sqlite3_column_bytes = Module['_sqlite3_column_bytes'] = makeInvalidEarlyAccess('_sqlite3_column_bytes');
|
|
var _sqlite3_column_double = Module['_sqlite3_column_double'] = makeInvalidEarlyAccess('_sqlite3_column_double');
|
|
var _sqlite3_bind_blob = Module['_sqlite3_bind_blob'] = makeInvalidEarlyAccess('_sqlite3_bind_blob');
|
|
var _sqlite3_bind_double = Module['_sqlite3_bind_double'] = makeInvalidEarlyAccess('_sqlite3_bind_double');
|
|
var _sqlite3_bind_int = Module['_sqlite3_bind_int'] = makeInvalidEarlyAccess('_sqlite3_bind_int');
|
|
var _sqlite3_bind_text = Module['_sqlite3_bind_text'] = makeInvalidEarlyAccess('_sqlite3_bind_text');
|
|
var _sqlite3_bind_parameter_index = Module['_sqlite3_bind_parameter_index'] = makeInvalidEarlyAccess('_sqlite3_bind_parameter_index');
|
|
var _sqlite3_sql = Module['_sqlite3_sql'] = makeInvalidEarlyAccess('_sqlite3_sql');
|
|
var _sqlite3_normalized_sql = Module['_sqlite3_normalized_sql'] = makeInvalidEarlyAccess('_sqlite3_normalized_sql');
|
|
var _sqlite3_changes = Module['_sqlite3_changes'] = makeInvalidEarlyAccess('_sqlite3_changes');
|
|
var _sqlite3_close_v2 = Module['_sqlite3_close_v2'] = makeInvalidEarlyAccess('_sqlite3_close_v2');
|
|
var _sqlite3_create_function_v2 = Module['_sqlite3_create_function_v2'] = makeInvalidEarlyAccess('_sqlite3_create_function_v2');
|
|
var _sqlite3_update_hook = Module['_sqlite3_update_hook'] = makeInvalidEarlyAccess('_sqlite3_update_hook');
|
|
var _sqlite3_open = Module['_sqlite3_open'] = makeInvalidEarlyAccess('_sqlite3_open');
|
|
var _strerror = makeInvalidEarlyAccess('_strerror');
|
|
var _malloc = Module['_malloc'] = makeInvalidEarlyAccess('_malloc');
|
|
var _free = Module['_free'] = makeInvalidEarlyAccess('_free');
|
|
var _RegisterExtensionFunctions = Module['_RegisterExtensionFunctions'] = makeInvalidEarlyAccess('_RegisterExtensionFunctions');
|
|
var _fflush = makeInvalidEarlyAccess('_fflush');
|
|
var _emscripten_stack_get_end = makeInvalidEarlyAccess('_emscripten_stack_get_end');
|
|
var _emscripten_stack_get_base = makeInvalidEarlyAccess('_emscripten_stack_get_base');
|
|
var _emscripten_builtin_memalign = makeInvalidEarlyAccess('_emscripten_builtin_memalign');
|
|
var _emscripten_stack_init = makeInvalidEarlyAccess('_emscripten_stack_init');
|
|
var _emscripten_stack_get_free = makeInvalidEarlyAccess('_emscripten_stack_get_free');
|
|
var __emscripten_stack_restore = makeInvalidEarlyAccess('__emscripten_stack_restore');
|
|
var __emscripten_stack_alloc = makeInvalidEarlyAccess('__emscripten_stack_alloc');
|
|
var _emscripten_stack_get_current = makeInvalidEarlyAccess('_emscripten_stack_get_current');
|
|
var ___set_stack_limits = Module['___set_stack_limits'] = makeInvalidEarlyAccess('___set_stack_limits');
|
|
var memory = makeInvalidEarlyAccess('memory');
|
|
var __indirect_function_table = makeInvalidEarlyAccess('__indirect_function_table');
|
|
var wasmMemory = makeInvalidEarlyAccess('wasmMemory');
|
|
var wasmTable = makeInvalidEarlyAccess('wasmTable');
|
|
|
|
function assignWasmExports(wasmExports) {
|
|
assert(typeof wasmExports['sqlite3_free'] != 'undefined', 'missing Wasm export: sqlite3_free');
|
|
assert(typeof wasmExports['sqlite3_value_text'] != 'undefined', 'missing Wasm export: sqlite3_value_text');
|
|
assert(typeof wasmExports['sqlite3_prepare_v2'] != 'undefined', 'missing Wasm export: sqlite3_prepare_v2');
|
|
assert(typeof wasmExports['sqlite3_step'] != 'undefined', 'missing Wasm export: sqlite3_step');
|
|
assert(typeof wasmExports['sqlite3_reset'] != 'undefined', 'missing Wasm export: sqlite3_reset');
|
|
assert(typeof wasmExports['sqlite3_exec'] != 'undefined', 'missing Wasm export: sqlite3_exec');
|
|
assert(typeof wasmExports['sqlite3_finalize'] != 'undefined', 'missing Wasm export: sqlite3_finalize');
|
|
assert(typeof wasmExports['sqlite3_column_name'] != 'undefined', 'missing Wasm export: sqlite3_column_name');
|
|
assert(typeof wasmExports['sqlite3_column_text'] != 'undefined', 'missing Wasm export: sqlite3_column_text');
|
|
assert(typeof wasmExports['sqlite3_column_type'] != 'undefined', 'missing Wasm export: sqlite3_column_type');
|
|
assert(typeof wasmExports['sqlite3_errmsg'] != 'undefined', 'missing Wasm export: sqlite3_errmsg');
|
|
assert(typeof wasmExports['sqlite3_clear_bindings'] != 'undefined', 'missing Wasm export: sqlite3_clear_bindings');
|
|
assert(typeof wasmExports['sqlite3_value_blob'] != 'undefined', 'missing Wasm export: sqlite3_value_blob');
|
|
assert(typeof wasmExports['sqlite3_value_bytes'] != 'undefined', 'missing Wasm export: sqlite3_value_bytes');
|
|
assert(typeof wasmExports['sqlite3_value_double'] != 'undefined', 'missing Wasm export: sqlite3_value_double');
|
|
assert(typeof wasmExports['sqlite3_value_int'] != 'undefined', 'missing Wasm export: sqlite3_value_int');
|
|
assert(typeof wasmExports['sqlite3_value_type'] != 'undefined', 'missing Wasm export: sqlite3_value_type');
|
|
assert(typeof wasmExports['sqlite3_result_blob'] != 'undefined', 'missing Wasm export: sqlite3_result_blob');
|
|
assert(typeof wasmExports['sqlite3_result_double'] != 'undefined', 'missing Wasm export: sqlite3_result_double');
|
|
assert(typeof wasmExports['sqlite3_result_error'] != 'undefined', 'missing Wasm export: sqlite3_result_error');
|
|
assert(typeof wasmExports['sqlite3_result_int'] != 'undefined', 'missing Wasm export: sqlite3_result_int');
|
|
assert(typeof wasmExports['sqlite3_result_int64'] != 'undefined', 'missing Wasm export: sqlite3_result_int64');
|
|
assert(typeof wasmExports['sqlite3_result_null'] != 'undefined', 'missing Wasm export: sqlite3_result_null');
|
|
assert(typeof wasmExports['sqlite3_result_text'] != 'undefined', 'missing Wasm export: sqlite3_result_text');
|
|
assert(typeof wasmExports['sqlite3_aggregate_context'] != 'undefined', 'missing Wasm export: sqlite3_aggregate_context');
|
|
assert(typeof wasmExports['sqlite3_column_count'] != 'undefined', 'missing Wasm export: sqlite3_column_count');
|
|
assert(typeof wasmExports['sqlite3_data_count'] != 'undefined', 'missing Wasm export: sqlite3_data_count');
|
|
assert(typeof wasmExports['sqlite3_column_blob'] != 'undefined', 'missing Wasm export: sqlite3_column_blob');
|
|
assert(typeof wasmExports['sqlite3_column_bytes'] != 'undefined', 'missing Wasm export: sqlite3_column_bytes');
|
|
assert(typeof wasmExports['sqlite3_column_double'] != 'undefined', 'missing Wasm export: sqlite3_column_double');
|
|
assert(typeof wasmExports['sqlite3_bind_blob'] != 'undefined', 'missing Wasm export: sqlite3_bind_blob');
|
|
assert(typeof wasmExports['sqlite3_bind_double'] != 'undefined', 'missing Wasm export: sqlite3_bind_double');
|
|
assert(typeof wasmExports['sqlite3_bind_int'] != 'undefined', 'missing Wasm export: sqlite3_bind_int');
|
|
assert(typeof wasmExports['sqlite3_bind_text'] != 'undefined', 'missing Wasm export: sqlite3_bind_text');
|
|
assert(typeof wasmExports['sqlite3_bind_parameter_index'] != 'undefined', 'missing Wasm export: sqlite3_bind_parameter_index');
|
|
assert(typeof wasmExports['sqlite3_sql'] != 'undefined', 'missing Wasm export: sqlite3_sql');
|
|
assert(typeof wasmExports['sqlite3_normalized_sql'] != 'undefined', 'missing Wasm export: sqlite3_normalized_sql');
|
|
assert(typeof wasmExports['sqlite3_changes'] != 'undefined', 'missing Wasm export: sqlite3_changes');
|
|
assert(typeof wasmExports['sqlite3_close_v2'] != 'undefined', 'missing Wasm export: sqlite3_close_v2');
|
|
assert(typeof wasmExports['sqlite3_create_function_v2'] != 'undefined', 'missing Wasm export: sqlite3_create_function_v2');
|
|
assert(typeof wasmExports['sqlite3_update_hook'] != 'undefined', 'missing Wasm export: sqlite3_update_hook');
|
|
assert(typeof wasmExports['sqlite3_open'] != 'undefined', 'missing Wasm export: sqlite3_open');
|
|
assert(typeof wasmExports['strerror'] != 'undefined', 'missing Wasm export: strerror');
|
|
assert(typeof wasmExports['malloc'] != 'undefined', 'missing Wasm export: malloc');
|
|
assert(typeof wasmExports['free'] != 'undefined', 'missing Wasm export: free');
|
|
assert(typeof wasmExports['RegisterExtensionFunctions'] != 'undefined', 'missing Wasm export: RegisterExtensionFunctions');
|
|
assert(typeof wasmExports['fflush'] != 'undefined', 'missing Wasm export: fflush');
|
|
assert(typeof wasmExports['emscripten_stack_get_end'] != 'undefined', 'missing Wasm export: emscripten_stack_get_end');
|
|
assert(typeof wasmExports['emscripten_stack_get_base'] != 'undefined', 'missing Wasm export: emscripten_stack_get_base');
|
|
assert(typeof wasmExports['emscripten_builtin_memalign'] != 'undefined', 'missing Wasm export: emscripten_builtin_memalign');
|
|
assert(typeof wasmExports['emscripten_stack_init'] != 'undefined', 'missing Wasm export: emscripten_stack_init');
|
|
assert(typeof wasmExports['emscripten_stack_get_free'] != 'undefined', 'missing Wasm export: emscripten_stack_get_free');
|
|
assert(typeof wasmExports['_emscripten_stack_restore'] != 'undefined', 'missing Wasm export: _emscripten_stack_restore');
|
|
assert(typeof wasmExports['_emscripten_stack_alloc'] != 'undefined', 'missing Wasm export: _emscripten_stack_alloc');
|
|
assert(typeof wasmExports['emscripten_stack_get_current'] != 'undefined', 'missing Wasm export: emscripten_stack_get_current');
|
|
assert(typeof wasmExports['__set_stack_limits'] != 'undefined', 'missing Wasm export: __set_stack_limits');
|
|
assert(typeof wasmExports['memory'] != 'undefined', 'missing Wasm export: memory');
|
|
assert(typeof wasmExports['__indirect_function_table'] != 'undefined', 'missing Wasm export: __indirect_function_table');
|
|
_sqlite3_free = Module['_sqlite3_free'] = createExportWrapper('sqlite3_free', 1);
|
|
_sqlite3_value_text = Module['_sqlite3_value_text'] = createExportWrapper('sqlite3_value_text', 1);
|
|
_sqlite3_prepare_v2 = Module['_sqlite3_prepare_v2'] = createExportWrapper('sqlite3_prepare_v2', 5);
|
|
_sqlite3_step = Module['_sqlite3_step'] = createExportWrapper('sqlite3_step', 1);
|
|
_sqlite3_reset = Module['_sqlite3_reset'] = createExportWrapper('sqlite3_reset', 1);
|
|
_sqlite3_exec = Module['_sqlite3_exec'] = createExportWrapper('sqlite3_exec', 5);
|
|
_sqlite3_finalize = Module['_sqlite3_finalize'] = createExportWrapper('sqlite3_finalize', 1);
|
|
_sqlite3_column_name = Module['_sqlite3_column_name'] = createExportWrapper('sqlite3_column_name', 2);
|
|
_sqlite3_column_text = Module['_sqlite3_column_text'] = createExportWrapper('sqlite3_column_text', 2);
|
|
_sqlite3_column_type = Module['_sqlite3_column_type'] = createExportWrapper('sqlite3_column_type', 2);
|
|
_sqlite3_errmsg = Module['_sqlite3_errmsg'] = createExportWrapper('sqlite3_errmsg', 1);
|
|
_sqlite3_clear_bindings = Module['_sqlite3_clear_bindings'] = createExportWrapper('sqlite3_clear_bindings', 1);
|
|
_sqlite3_value_blob = Module['_sqlite3_value_blob'] = createExportWrapper('sqlite3_value_blob', 1);
|
|
_sqlite3_value_bytes = Module['_sqlite3_value_bytes'] = createExportWrapper('sqlite3_value_bytes', 1);
|
|
_sqlite3_value_double = Module['_sqlite3_value_double'] = createExportWrapper('sqlite3_value_double', 1);
|
|
_sqlite3_value_int = Module['_sqlite3_value_int'] = createExportWrapper('sqlite3_value_int', 1);
|
|
_sqlite3_value_type = Module['_sqlite3_value_type'] = createExportWrapper('sqlite3_value_type', 1);
|
|
_sqlite3_result_blob = Module['_sqlite3_result_blob'] = createExportWrapper('sqlite3_result_blob', 4);
|
|
_sqlite3_result_double = Module['_sqlite3_result_double'] = createExportWrapper('sqlite3_result_double', 2);
|
|
_sqlite3_result_error = Module['_sqlite3_result_error'] = createExportWrapper('sqlite3_result_error', 3);
|
|
_sqlite3_result_int = Module['_sqlite3_result_int'] = createExportWrapper('sqlite3_result_int', 2);
|
|
_sqlite3_result_int64 = Module['_sqlite3_result_int64'] = createExportWrapper('sqlite3_result_int64', 2);
|
|
_sqlite3_result_null = Module['_sqlite3_result_null'] = createExportWrapper('sqlite3_result_null', 1);
|
|
_sqlite3_result_text = Module['_sqlite3_result_text'] = createExportWrapper('sqlite3_result_text', 4);
|
|
_sqlite3_aggregate_context = Module['_sqlite3_aggregate_context'] = createExportWrapper('sqlite3_aggregate_context', 2);
|
|
_sqlite3_column_count = Module['_sqlite3_column_count'] = createExportWrapper('sqlite3_column_count', 1);
|
|
_sqlite3_data_count = Module['_sqlite3_data_count'] = createExportWrapper('sqlite3_data_count', 1);
|
|
_sqlite3_column_blob = Module['_sqlite3_column_blob'] = createExportWrapper('sqlite3_column_blob', 2);
|
|
_sqlite3_column_bytes = Module['_sqlite3_column_bytes'] = createExportWrapper('sqlite3_column_bytes', 2);
|
|
_sqlite3_column_double = Module['_sqlite3_column_double'] = createExportWrapper('sqlite3_column_double', 2);
|
|
_sqlite3_bind_blob = Module['_sqlite3_bind_blob'] = createExportWrapper('sqlite3_bind_blob', 5);
|
|
_sqlite3_bind_double = Module['_sqlite3_bind_double'] = createExportWrapper('sqlite3_bind_double', 3);
|
|
_sqlite3_bind_int = Module['_sqlite3_bind_int'] = createExportWrapper('sqlite3_bind_int', 3);
|
|
_sqlite3_bind_text = Module['_sqlite3_bind_text'] = createExportWrapper('sqlite3_bind_text', 5);
|
|
_sqlite3_bind_parameter_index = Module['_sqlite3_bind_parameter_index'] = createExportWrapper('sqlite3_bind_parameter_index', 2);
|
|
_sqlite3_sql = Module['_sqlite3_sql'] = createExportWrapper('sqlite3_sql', 1);
|
|
_sqlite3_normalized_sql = Module['_sqlite3_normalized_sql'] = createExportWrapper('sqlite3_normalized_sql', 1);
|
|
_sqlite3_changes = Module['_sqlite3_changes'] = createExportWrapper('sqlite3_changes', 1);
|
|
_sqlite3_close_v2 = Module['_sqlite3_close_v2'] = createExportWrapper('sqlite3_close_v2', 1);
|
|
_sqlite3_create_function_v2 = Module['_sqlite3_create_function_v2'] = createExportWrapper('sqlite3_create_function_v2', 9);
|
|
_sqlite3_update_hook = Module['_sqlite3_update_hook'] = createExportWrapper('sqlite3_update_hook', 3);
|
|
_sqlite3_open = Module['_sqlite3_open'] = createExportWrapper('sqlite3_open', 2);
|
|
_strerror = createExportWrapper('strerror', 1);
|
|
_malloc = Module['_malloc'] = createExportWrapper('malloc', 1);
|
|
_free = Module['_free'] = createExportWrapper('free', 1);
|
|
_RegisterExtensionFunctions = Module['_RegisterExtensionFunctions'] = createExportWrapper('RegisterExtensionFunctions', 1);
|
|
_fflush = createExportWrapper('fflush', 1);
|
|
_emscripten_stack_get_end = wasmExports['emscripten_stack_get_end'];
|
|
_emscripten_stack_get_base = wasmExports['emscripten_stack_get_base'];
|
|
_emscripten_builtin_memalign = createExportWrapper('emscripten_builtin_memalign', 2);
|
|
_emscripten_stack_init = wasmExports['emscripten_stack_init'];
|
|
_emscripten_stack_get_free = wasmExports['emscripten_stack_get_free'];
|
|
__emscripten_stack_restore = wasmExports['_emscripten_stack_restore'];
|
|
__emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc'];
|
|
_emscripten_stack_get_current = wasmExports['emscripten_stack_get_current'];
|
|
___set_stack_limits = Module['___set_stack_limits'] = createExportWrapper('__set_stack_limits', 2);
|
|
memory = wasmMemory = wasmExports['memory'];
|
|
__indirect_function_table = wasmTable = wasmExports['__indirect_function_table'];
|
|
}
|
|
|
|
var wasmImports = {
|
|
/** @export */
|
|
__assert_fail: ___assert_fail,
|
|
/** @export */
|
|
__handle_stack_overflow: ___handle_stack_overflow,
|
|
/** @export */
|
|
__syscall_chmod: ___syscall_chmod,
|
|
/** @export */
|
|
__syscall_faccessat: ___syscall_faccessat,
|
|
/** @export */
|
|
__syscall_fchmod: ___syscall_fchmod,
|
|
/** @export */
|
|
__syscall_fchown32: ___syscall_fchown32,
|
|
/** @export */
|
|
__syscall_fcntl64: ___syscall_fcntl64,
|
|
/** @export */
|
|
__syscall_fstat64: ___syscall_fstat64,
|
|
/** @export */
|
|
__syscall_ftruncate64: ___syscall_ftruncate64,
|
|
/** @export */
|
|
__syscall_getcwd: ___syscall_getcwd,
|
|
/** @export */
|
|
__syscall_lstat64: ___syscall_lstat64,
|
|
/** @export */
|
|
__syscall_mkdirat: ___syscall_mkdirat,
|
|
/** @export */
|
|
__syscall_newfstatat: ___syscall_newfstatat,
|
|
/** @export */
|
|
__syscall_openat: ___syscall_openat,
|
|
/** @export */
|
|
__syscall_readlinkat: ___syscall_readlinkat,
|
|
/** @export */
|
|
__syscall_rmdir: ___syscall_rmdir,
|
|
/** @export */
|
|
__syscall_stat64: ___syscall_stat64,
|
|
/** @export */
|
|
__syscall_unlinkat: ___syscall_unlinkat,
|
|
/** @export */
|
|
__syscall_utimensat: ___syscall_utimensat,
|
|
/** @export */
|
|
_abort_js: __abort_js,
|
|
/** @export */
|
|
_localtime_js: __localtime_js,
|
|
/** @export */
|
|
_mmap_js: __mmap_js,
|
|
/** @export */
|
|
_munmap_js: __munmap_js,
|
|
/** @export */
|
|
_tzset_js: __tzset_js,
|
|
/** @export */
|
|
clock_time_get: _clock_time_get,
|
|
/** @export */
|
|
emscripten_date_now: _emscripten_date_now,
|
|
/** @export */
|
|
emscripten_get_heap_max: _emscripten_get_heap_max,
|
|
/** @export */
|
|
emscripten_get_now: _emscripten_get_now,
|
|
/** @export */
|
|
emscripten_resize_heap: _emscripten_resize_heap,
|
|
/** @export */
|
|
environ_get: _environ_get,
|
|
/** @export */
|
|
environ_sizes_get: _environ_sizes_get,
|
|
/** @export */
|
|
fd_close: _fd_close,
|
|
/** @export */
|
|
fd_fdstat_get: _fd_fdstat_get,
|
|
/** @export */
|
|
fd_read: _fd_read,
|
|
/** @export */
|
|
fd_seek: _fd_seek,
|
|
/** @export */
|
|
fd_sync: _fd_sync,
|
|
/** @export */
|
|
fd_write: _fd_write
|
|
};
|
|
|
|
|
|
// include: postamble.js
|
|
// === Auto-generated postamble setup entry stuff ===
|
|
|
|
var calledRun;
|
|
|
|
function stackCheckInit() {
|
|
// This is normally called automatically during __wasm_call_ctors but need to
|
|
// get these values before even running any of the ctors so we call it redundantly
|
|
// here.
|
|
_emscripten_stack_init();
|
|
// TODO(sbc): Move writeStackCookie to native to to avoid this.
|
|
writeStackCookie();
|
|
}
|
|
|
|
function run() {
|
|
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
|
|
stackCheckInit();
|
|
|
|
preRun();
|
|
|
|
// a preRun added a dependency, run will be called later
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
|
|
function doRun() {
|
|
// run may have just been called through dependencies being fulfilled just in this very frame,
|
|
// or while the async setStatus time below was happening
|
|
assert(!calledRun);
|
|
calledRun = true;
|
|
Module['calledRun'] = true;
|
|
|
|
if (ABORT) return;
|
|
|
|
initRuntime();
|
|
|
|
Module['onRuntimeInitialized']?.();
|
|
consumedModuleProp('onRuntimeInitialized');
|
|
|
|
assert(!Module['_main'], 'compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]');
|
|
|
|
postRun();
|
|
}
|
|
|
|
if (Module['setStatus']) {
|
|
Module['setStatus']('Running...');
|
|
setTimeout(() => {
|
|
setTimeout(() => Module['setStatus'](''), 1);
|
|
doRun();
|
|
}, 1);
|
|
} else
|
|
{
|
|
doRun();
|
|
}
|
|
checkStackCookie();
|
|
}
|
|
|
|
function checkUnflushedContent() {
|
|
// Compiler settings do not allow exiting the runtime, so flushing
|
|
// the streams is not possible. but in ASSERTIONS mode we check
|
|
// if there was something to flush, and if so tell the user they
|
|
// should request that the runtime be exitable.
|
|
// Normally we would not even include flush() at all, but in ASSERTIONS
|
|
// builds we do so just for this check, and here we see if there is any
|
|
// content to flush, that is, we check if there would have been
|
|
// something a non-ASSERTIONS build would have not seen.
|
|
// How we flush the streams depends on whether we are in SYSCALLS_REQUIRE_FILESYSTEM=0
|
|
// mode (which has its own special function for this; otherwise, all
|
|
// the code is inside libc)
|
|
var oldOut = out;
|
|
var oldErr = err;
|
|
var has = false;
|
|
out = err = (x) => {
|
|
has = true;
|
|
}
|
|
try { // it doesn't matter if it fails
|
|
_fflush(0);
|
|
// also flush in the JS FS layer
|
|
for (var name of ['stdout', 'stderr']) {
|
|
var info = FS.analyzePath('/dev/' + name);
|
|
if (!info) return;
|
|
var stream = info.object;
|
|
var rdev = stream.rdev;
|
|
var tty = TTY.ttys[rdev];
|
|
if (tty?.output?.length) {
|
|
has = true;
|
|
}
|
|
}
|
|
} catch(e) {}
|
|
out = oldOut;
|
|
err = oldErr;
|
|
if (has) {
|
|
warnOnce('stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1 (see the Emscripten FAQ), or make sure to emit a newline when you printf etc.');
|
|
}
|
|
}
|
|
|
|
var wasmExports;
|
|
|
|
// With async instantation wasmExports is assigned asynchronously when the
|
|
// instance is received.
|
|
createWasm();
|
|
|
|
run();
|
|
|
|
// end include: postamble.js
|
|
|
|
|
|
|
|
// The shell-pre.js and emcc-generated code goes above
|
|
return Module;
|
|
}); // The end of the promise being returned
|
|
|
|
return initSqlJsPromise;
|
|
} // The end of our initSqlJs function
|
|
|
|
// This bit below is copied almost exactly from what you get when you use the MODULARIZE=1 flag with emcc
|
|
// However, we don't want to use the emcc modularization. See shell-pre.js
|
|
if (typeof exports === 'object' && typeof module === 'object'){
|
|
module.exports = initSqlJs;
|
|
// This will allow the module to be used in ES6 or CommonJS
|
|
module.exports.default = initSqlJs;
|
|
}
|
|
else if (typeof define === 'function' && define['amd']) {
|
|
define([], function() { return initSqlJs; });
|
|
}
|
|
else if (typeof exports === 'object'){
|
|
exports["Module"] = initSqlJs;
|
|
}
|