2022-09-17 15:08:22 +00:00
/ *
2022 - 09 - 16
The author disclaims copyright to this source code . In place of a
legal notice , here is a blessing :
* May you do good and not evil .
* May you find forgiveness for yourself and forgive others .
* May you share freely , never taking more than you give .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2022-10-25 08:06:17 +00:00
A Worker which manages asynchronous OPFS handles on behalf of a
2022-10-14 15:52:29 +00:00
synchronous API which controls it via a combination of Worker
messages , SharedArrayBuffer , and Atomics . It is the asynchronous
2022-12-08 04:19:38 +00:00
counterpart of the API defined in sqlite3 - vfs - opfs . js .
2022-09-17 15:08:22 +00:00
Highly indebted to :
https : //github.com/rhashimoto/wa-sqlite/blob/master/src/examples/OriginPrivateFileSystemVFS.js
for demonstrating how to use the OPFS APIs .
2022-09-17 20:50:12 +00:00
This file is to be loaded as a Worker . It does not have any direct
access to the sqlite3 JS / WASM bits , so any bits which it needs ( most
notably SQLITE _xxx integer codes ) have to be imported into it via an
initialization process .
2022-10-25 08:06:17 +00:00
This file represents an implementation detail of a larger piece of
code , and not a public interface . Its details may change at any time
and are not intended to be used by any client - level code .
2022-11-27 00:57:56 +00:00
2022 - 11 - 27 : Chrome v108 changes some async methods to synchronous , as
documented at :
https : //developer.chrome.com/blog/sync-methods-for-accesshandles/
2023-07-16 11:49:18 +00:00
Firefox v111 and Safari 16.4 , both released in March 2023 , also
include this .
2022-11-27 00:57:56 +00:00
We cannot change to the sync forms at this point without breaking
clients who use Chrome v104 - ish or higher . truncate ( ) , getSize ( ) ,
flush ( ) , and close ( ) are now ( as of v108 ) synchronous . Calling them
with an "await" , as we have to for the async forms , is still legal
with the sync forms but is superfluous . Calling the async forms with
theFunc ( ) . then ( ... ) is not compatible with the change to
synchronous , but we do do not use those APIs that way . i . e . we don ' t
_need _ to change anything for this , but at some point ( after Chrome
versions ( approximately ) 104 - 107 are extinct ) should change our
usage of those methods to remove the "await" .
2022-09-17 15:08:22 +00:00
* /
2022-10-25 08:06:17 +00:00
"use strict" ;
2022-11-29 05:25:08 +00:00
const wPost = ( type , ... args ) => postMessage ( { type , payload : args } ) ;
const installAsyncProxy = function ( self ) {
const toss = function ( ... args ) { throw new Error ( args . join ( ' ' ) ) } ;
2023-03-07 19:12:06 +00:00
if ( globalThis . window === globalThis ) {
2022-11-29 05:25:08 +00:00
toss ( "This code cannot run from the main thread." ,
"Load it as a Worker from a separate Worker." ) ;
2023-03-07 19:12:06 +00:00
} else if ( ! navigator ? . storage ? . getDirectory ) {
2022-11-29 05:25:08 +00:00
toss ( "This API requires navigator.storage.getDirectory." ) ;
}
2022-10-04 17:06:51 +00:00
2022-11-29 05:25:08 +00:00
/ * *
Will hold state copied to this object from the syncronous side of
this API .
* /
const state = Object . create ( null ) ;
2022-11-10 13:14:30 +00:00
2022-11-29 05:25:08 +00:00
/ * *
verbose :
2022-09-17 15:08:22 +00:00
2022-11-29 05:25:08 +00:00
0 = no logging output
1 = only errors
2 = warnings and errors
3 = debug , warnings , and errors
* /
state . verbose = 1 ;
2022-09-17 20:50:12 +00:00
2022-11-29 05:25:08 +00:00
const loggers = {
2023-02-06 21:20:59 +00:00
0 : console . error . bind ( console ) ,
1 : console . warn . bind ( console ) ,
2 : console . log . bind ( console )
2022-11-29 05:25:08 +00:00
} ;
const logImpl = ( level , ... args ) => {
if ( state . verbose > level ) loggers [ level ] ( "OPFS asyncer:" , ... args ) ;
} ;
const log = ( ... args ) => logImpl ( 2 , ... args ) ;
const warn = ( ... args ) => logImpl ( 1 , ... args ) ;
const error = ( ... args ) => logImpl ( 0 , ... args ) ;
const metrics = Object . create ( null ) ;
metrics . reset = ( ) => {
let k ;
const r = ( m ) => ( m . count = m . time = m . wait = 0 ) ;
for ( k in state . opIds ) {
r ( metrics [ k ] = Object . create ( null ) ) ;
2022-09-17 15:08:22 +00:00
}
2022-11-29 05:25:08 +00:00
let s = metrics . s11n = Object . create ( null ) ;
s = s . serialize = Object . create ( null ) ;
s . count = s . time = 0 ;
s = metrics . s11n . deserialize = Object . create ( null ) ;
s . count = s . time = 0 ;
} ;
metrics . dump = ( ) => {
let k , n = 0 , t = 0 , w = 0 ;
for ( k in state . opIds ) {
const m = metrics [ k ] ;
n += m . count ;
t += m . time ;
w += m . wait ;
m . avgTime = ( m . count && m . time ) ? ( m . time / m . count ) : 0 ;
2022-11-21 03:50:52 +00:00
}
2023-03-07 19:12:06 +00:00
console . log ( globalThis ? . location ? . href ,
"metrics for" , globalThis ? . location ? . href , ":\n" ,
2022-11-29 05:25:08 +00:00
metrics ,
"\nTotal of" , n , "op(s) for" , t , "ms" ,
"approx" , w , "ms spent waiting on OPFS APIs." ) ;
2023-02-06 21:20:59 +00:00
console . log ( "Serialization metrics:" , metrics . s11n ) ;
2022-11-29 05:25:08 +00:00
} ;
2022-11-21 03:50:52 +00:00
2022-11-29 05:25:08 +00:00
/ * *
_ _openFiles is a map of sqlite3 _file pointers ( integers ) to
metadata related to a given OPFS file handles . The pointers are , in
this side of the interface , opaque file handle IDs provided by the
synchronous part of this constellation . Each value is an object
with a structure demonstrated in the xOpen ( ) impl .
* /
const _ _openFiles = Object . create ( null ) ;
/ * *
_ _implicitLocks is a Set of sqlite3 _file pointers ( integers ) which were
"auto-locked" . i . e . those for which we obtained a sync access
handle without an explicit xLock ( ) call . Such locks will be
released during db connection idle time , whereas a sync access
handle obtained via xLock ( ) , or subsequently xLock ( ) ' d after
auto - acquisition , will not be released until xUnlock ( ) is called .
Maintenance reminder : if we relinquish auto - locks at the end of the
operation which acquires them , we pay a massive performance
penalty : speedtest1 benchmarks take up to 4 x as long . By delaying
the lock release until idle time , the hit is negligible .
* /
const _ _implicitLocks = new Set ( ) ;
2022-11-23 16:39:07 +00:00
2022-11-29 05:25:08 +00:00
/ * *
Expects an OPFS file path . It gets resolved , such that ".."
components are properly expanded , and returned . If the 2 nd arg is
true , the result is returned as an array of path elements , else an
absolute path string is returned .
* /
const getResolvedPath = function ( filename , splitIt ) {
const p = new URL (
filename , 'file://irrelevant'
) . pathname ;
return splitIt ? p . split ( '/' ) . filter ( ( v ) => ! ! v ) : p ;
} ;
/ * *
Takes the absolute path to a filesystem element . Returns an array
of [ handleOfContainingDir , filename ] . If the 2 nd argument is truthy
then each directory element leading to the file is created along
the way . Throws if any creation or resolution fails .
* /
const getDirForFilename = async function f ( absFilename , createDirs = false ) {
const path = getResolvedPath ( absFilename , true ) ;
const filename = path . pop ( ) ;
let dh = state . rootDir ;
for ( const dirName of path ) {
if ( dirName ) {
dh = await dh . getDirectoryHandle ( dirName , { create : ! ! createDirs } ) ;
2022-10-04 09:12:05 +00:00
}
}
2022-11-29 05:25:08 +00:00
return [ dh , filename ] ;
} ;
/ * *
If the given file - holding object has a sync handle attached to it ,
that handle is remove and asynchronously closed . Though it may
sound sensible to continue work as soon as the close ( ) returns
( noting that it ' s asynchronous ) , doing so can cause operations
performed soon afterwards , e . g . a call to getSyncHandle ( ) to fail
because they may happen out of order from the close ( ) . OPFS does
not guaranty that the actual order of operations is retained in
such cases . i . e . always "await" on the result of this function .
* /
const closeSyncHandle = async ( fh ) => {
if ( fh . syncHandle ) {
log ( "Closing sync handle for" , fh . filenameAbs ) ;
const h = fh . syncHandle ;
delete fh . syncHandle ;
delete fh . xLock ;
_ _implicitLocks . delete ( fh . fid ) ;
return h . close ( ) ;
2022-11-10 13:14:30 +00:00
}
2022-11-29 05:25:08 +00:00
} ;
2022-10-03 11:33:35 +00:00
2022-11-02 11:53:31 +00:00
/ * *
2022-11-29 05:25:08 +00:00
A proxy for closeSyncHandle ( ) which is guaranteed to not throw .
This function is part of a lock / unlock step in functions which
require a sync access handle but may be called without xLock ( )
having been called first . Such calls need to release that
handle to avoid locking the file for all of time . This is an
_attempt _ at reducing cross - tab contention but it may prove
to be more of a problem than a solution and may need to be
removed .
2022-11-02 11:53:31 +00:00
* /
2022-11-29 05:25:08 +00:00
const closeSyncHandleNoThrow = async ( fh ) => {
try { await closeSyncHandle ( fh ) }
catch ( e ) {
warn ( "closeSyncHandleNoThrow() ignoring:" , e , fh ) ;
}
} ;
2022-10-04 00:54:00 +00:00
2022-11-29 05:25:08 +00:00
/* Release all auto-locks. */
const releaseImplicitLocks = async ( ) => {
if ( _ _implicitLocks . size ) {
/* Release all auto-locks. */
for ( const fid of _ _implicitLocks ) {
const fh = _ _openFiles [ fid ] ;
await closeSyncHandleNoThrow ( fh ) ;
log ( "Auto-unlocked" , fid , fh . filenameAbs ) ;
}
}
} ;
2022-10-04 17:06:51 +00:00
2022-11-29 05:25:08 +00:00
/ * *
An experiment in improving concurrency by freeing up implicit locks
sooner . This is known to impact performance dramatically but it has
also shown to improve concurrency considerably .
If fh . releaseImplicitLocks is truthy and fh is in _ _implicitLocks ,
this routine returns closeSyncHandleNoThrow ( ) , else it is a no - op .
* /
const releaseImplicitLock = async ( fh ) => {
if ( fh . releaseImplicitLocks && _ _implicitLocks . has ( fh . fid ) ) {
return closeSyncHandleNoThrow ( fh ) ;
2022-09-19 18:22:29 +00:00
}
2022-11-29 05:25:08 +00:00
} ;
/ * *
An error class specifically for use with getSyncHandle ( ) , the goal
of which is to eventually be able to distinguish unambiguously
between locking - related failures and other types , noting that we
cannot currently do so because createSyncAccessHandle ( ) does not
define its exceptions in the required level of detail .
2022-11-29 06:09:32 +00:00
2022 - 11 - 29 : according to :
https : //github.com/whatwg/fs/pull/21
NoModificationAllowedError will be the standard exception thrown
when acquisition of a sync access handle fails due to a locking
error . As of this writing , that error type is not visible in the
dev console in Chrome v109 , nor is it documented in MDN , but an
error with that "name" property is being thrown from the OPFS
layer .
2022-11-29 05:25:08 +00:00
* /
class GetSyncHandleError extends Error {
constructor ( errorObject , ... msg ) {
2022-11-29 06:09:32 +00:00
super ( [
... msg , ': ' + errorObject . name + ':' ,
2022-11-29 05:25:08 +00:00
errorObject . message
2022-11-29 06:09:32 +00:00
] . join ( ' ' ) , {
cause : errorObject
} ) ;
2022-11-29 05:25:08 +00:00
this . name = 'GetSyncHandleError' ;
2022-09-17 23:29:27 +00:00
}
2022-11-29 05:25:08 +00:00
} ;
GetSyncHandleError . convertRc = ( e , rc ) => {
2022-12-02 18:06:26 +00:00
if ( 1 ) {
return (
e instanceof GetSyncHandleError
&& ( ( e . cause . name === 'NoModificationAllowedError' )
/* Inconsistent exception.name from Chrome/ium with the
same exception . message text : * /
|| ( e . cause . name === 'DOMException'
&& 0 === e . cause . message . indexOf ( 'Access Handles cannot' ) ) )
) ? (
2023-02-06 21:20:59 +00:00
/*console.warn("SQLITE_BUSY",e),*/
2022-12-02 18:06:26 +00:00
state . sq3Codes . SQLITE _BUSY
) : rc ;
2022-09-17 20:50:12 +00:00
} else {
2022-11-29 05:25:08 +00:00
return rc ;
2022-09-17 20:50:12 +00:00
}
2022-11-29 05:25:08 +00:00
}
/ * *
Returns the sync access handle associated with the given file
handle object ( which must be a valid handle object , as created by
xOpen ( ) ) , lazily opening it if needed .
2022-11-30 03:08:50 +00:00
In order to help alleviate cross - tab contention for a dabase , if
an exception is thrown while acquiring the handle , this routine
will wait briefly and try again , up to some fixed number of
times . If acquisition still fails at that point it will give up
and propagate the exception . Client - level code will see that as
an I / O error .
2022-11-29 05:25:08 +00:00
* /
const getSyncHandle = async ( fh , opName ) => {
if ( ! fh . syncHandle ) {
const t = performance . now ( ) ;
log ( "Acquiring sync handle for" , fh . filenameAbs ) ;
2022-12-02 18:56:37 +00:00
const maxTries = 6 ,
msBase = state . asyncIdleWaitTime * 2 ;
2022-11-29 05:25:08 +00:00
let i = 1 , ms = msBase ;
for ( ; true ; ms = msBase * ++ i ) {
try {
//if(i<3) toss("Just testing getSyncHandle() wait-and-retry.");
//TODO? A config option which tells it to throw here
//randomly every now and then, for testing purposes.
fh . syncHandle = await fh . fileHandle . createSyncAccessHandle ( ) ;
break ;
} catch ( e ) {
if ( i === maxTries ) {
throw new GetSyncHandleError (
e , "Error getting sync handle for" , opName + "()." , maxTries ,
"attempts failed." , fh . filenameAbs
) ;
}
warn ( "Error getting sync handle for" , opName + "(). Waiting" , ms ,
"ms and trying again." , fh . filenameAbs , e ) ;
Atomics . wait ( state . sabOPView , state . opIds . retry , 0 , ms ) ;
}
}
log ( "Got" , opName + "() sync handle for" , fh . filenameAbs ,
'in' , performance . now ( ) - t , 'ms' ) ;
if ( ! fh . xLock ) {
_ _implicitLocks . add ( fh . fid ) ;
2022-11-30 03:08:50 +00:00
log ( "Acquired implicit lock for" , opName + "()" , fh . fid , fh . filenameAbs ) ;
2022-09-17 23:29:27 +00:00
}
2022-09-17 20:50:12 +00:00
}
2022-11-29 05:25:08 +00:00
return fh . syncHandle ;
} ;
/ * *
Stores the given value at state . sabOPView [ state . opIds . rc ] and then
Atomics . notify ( ) ' s it .
* /
const storeAndNotify = ( opName , value ) => {
log ( opName + "() => notify(" , value , ")" ) ;
Atomics . store ( state . sabOPView , state . opIds . rc , value ) ;
Atomics . notify ( state . sabOPView , state . opIds . rc ) ;
} ;
/ * *
Throws if fh is a file - holding object which is flagged as read - only .
* /
const affirmNotRO = function ( opName , fh ) {
if ( fh . readOnly ) toss ( opName + "(): File is read-only: " + fh . filenameAbs ) ;
} ;
/ * *
We track 2 different timers : the "metrics" timer records how much
time we spend performing work . The "wait" timer records how much
time we spend waiting on the underlying OPFS timer . See the calls
to mTimeStart ( ) , mTimeEnd ( ) , wTimeStart ( ) , and wTimeEnd ( )
throughout this file to see how they ' re used .
* /
const _ _mTimer = Object . create ( null ) ;
_ _mTimer . op = undefined ;
_ _mTimer . start = undefined ;
const mTimeStart = ( op ) => {
_ _mTimer . start = performance . now ( ) ;
_ _mTimer . op = op ;
//metrics[op] || toss("Maintenance required: missing metrics for",op);
++ metrics [ op ] . count ;
} ;
const mTimeEnd = ( ) => (
metrics [ _ _mTimer . op ] . time += performance . now ( ) - _ _mTimer . start
) ;
const _ _wTimer = Object . create ( null ) ;
_ _wTimer . op = undefined ;
_ _wTimer . start = undefined ;
const wTimeStart = ( op ) => {
_ _wTimer . start = performance . now ( ) ;
_ _wTimer . op = op ;
//metrics[op] || toss("Maintenance required: missing metrics for",op);
} ;
const wTimeEnd = ( ) => (
metrics [ _ _wTimer . op ] . wait += performance . now ( ) - _ _wTimer . start
) ;
/ * *
Gets set to true by the 'opfs-async-shutdown' command to quit the
wait loop . This is only intended for debugging purposes : we cannot
inspect this file ' s state while the tight waitLoop ( ) is running and
need a way to stop that loop for introspection purposes .
* /
let flagAsyncShutdown = false ;
/ * *
Asynchronous wrappers for sqlite3 _vfs and sqlite3 _io _methods
methods , as well as helpers like mkdir ( ) . Maintenance reminder :
members are in alphabetical order to simplify finding them .
* /
const vfsAsyncImpls = {
'opfs-async-metrics' : async ( ) => {
mTimeStart ( 'opfs-async-metrics' ) ;
metrics . dump ( ) ;
storeAndNotify ( 'opfs-async-metrics' , 0 ) ;
mTimeEnd ( ) ;
} ,
'opfs-async-shutdown' : async ( ) => {
flagAsyncShutdown = true ;
storeAndNotify ( 'opfs-async-shutdown' , 0 ) ;
} ,
mkdir : async ( dirname ) => {
mTimeStart ( 'mkdir' ) ;
let rc = 0 ;
wTimeStart ( 'mkdir' ) ;
2022-11-10 13:14:30 +00:00
try {
2022-11-29 05:25:08 +00:00
await getDirForFilename ( dirname + "/filepart" , true ) ;
2022-11-10 13:14:30 +00:00
} catch ( e ) {
2022-11-29 05:25:08 +00:00
state . s11n . storeException ( 2 , e ) ;
rc = state . sq3Codes . SQLITE _IOERR ;
} finally {
wTimeEnd ( ) ;
2022-10-04 17:06:51 +00:00
}
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'mkdir' , rc ) ;
mTimeEnd ( ) ;
} ,
xAccess : async ( filename ) => {
mTimeStart ( 'xAccess' ) ;
2022-12-08 04:19:38 +00:00
/ * O P F S c a n n o t s u p p o r t t h e f u l l r a n g e o f x A c c e s s ( ) q u e r i e s
sqlite3 calls for . We can essentially just tell if the file
is accessible , but if it is then it ' s automatically writable
( unless it ' s locked , which we cannot ( ? ) know without trying
to open it ) . OPFS does not have the notion of read - only .
2022-11-29 05:25:08 +00:00
The return semantics of this function differ from sqlite3 ' s
xAccess semantics because we are limited in what we can
communicate back to our synchronous communication partner : 0 =
accessible , non - 0 means not accessible .
* /
let rc = 0 ;
wTimeStart ( 'xAccess' ) ;
try {
const [ dh , fn ] = await getDirForFilename ( filename ) ;
await dh . getFileHandle ( fn ) ;
2022-09-17 15:08:22 +00:00
} catch ( e ) {
2022-11-29 05:25:08 +00:00
state . s11n . storeException ( 2 , e ) ;
rc = state . sq3Codes . SQLITE _IOERR ;
} finally {
2022-09-20 08:27:57 +00:00
wTimeEnd ( ) ;
2022-09-17 15:08:22 +00:00
}
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'xAccess' , rc ) ;
mTimeEnd ( ) ;
} ,
xClose : async function ( fid /*sqlite3_file pointer*/ ) {
const opName = 'xClose' ;
mTimeStart ( opName ) ;
_ _implicitLocks . delete ( fid ) ;
const fh = _ _openFiles [ fid ] ;
let rc = 0 ;
wTimeStart ( opName ) ;
if ( fh ) {
delete _ _openFiles [ fid ] ;
await closeSyncHandle ( fh ) ;
if ( fh . deleteOnClose ) {
try { await fh . dirHandle . removeEntry ( fh . filenamePart ) }
catch ( e ) { warn ( "Ignoring dirHandle.removeEntry() failure of" , fh , e ) }
}
} else {
state . s11n . serialize ( ) ;
rc = state . sq3Codes . SQLITE _NOTFOUND ;
}
2022-09-20 08:27:57 +00:00
wTimeEnd ( ) ;
2022-11-29 05:25:08 +00:00
storeAndNotify ( opName , rc ) ;
mTimeEnd ( ) ;
} ,
xDelete : async function ( ... args ) {
mTimeStart ( 'xDelete' ) ;
const rc = await vfsAsyncImpls . xDeleteNoWait ( ... args ) ;
storeAndNotify ( 'xDelete' , rc ) ;
mTimeEnd ( ) ;
} ,
xDeleteNoWait : async function ( filename , syncDir = 0 , recursive = false ) {
/ * T h e s y n c D i r f l a g i s , f o r p u r p o s e s o f t h e V F S A P I ' s s e m a n t i c s ,
ignored here . However , if it has the value 0x1234 then : after
deleting the given file , recursively try to delete any empty
directories left behind in its wake ( ignoring any errors and
stopping at the first failure ) .
That said : we don ' t know for sure that removeEntry ( ) fails if
the dir is not empty because the API is not documented . It has ,
however , a "recursive" flag which defaults to false , so
presumably it will fail if the dir is not empty and that flag
is false .
* /
let rc = 0 ;
wTimeStart ( 'xDelete' ) ;
try {
while ( filename ) {
const [ hDir , filenamePart ] = await getDirForFilename ( filename , false ) ;
if ( ! filenamePart ) break ;
await hDir . removeEntry ( filenamePart , { recursive } ) ;
if ( 0x1234 !== syncDir ) break ;
recursive = false ;
filename = getResolvedPath ( filename , true ) ;
filename . pop ( ) ;
filename = filename . join ( '/' ) ;
}
} catch ( e ) {
state . s11n . storeException ( 2 , e ) ;
rc = state . sq3Codes . SQLITE _IOERR _DELETE ;
2022-11-23 16:39:07 +00:00
}
2022-09-20 08:27:57 +00:00
wTimeEnd ( ) ;
2022-11-29 05:25:08 +00:00
return rc ;
} ,
xFileSize : async function ( fid /*sqlite3_file pointer*/ ) {
mTimeStart ( 'xFileSize' ) ;
const fh = _ _openFiles [ fid ] ;
2022-12-02 18:56:37 +00:00
let rc = 0 ;
2022-11-29 05:25:08 +00:00
wTimeStart ( 'xFileSize' ) ;
try {
const sz = await ( await getSyncHandle ( fh , 'xFileSize' ) ) . getSize ( ) ;
state . s11n . serialize ( Number ( sz ) ) ;
} catch ( e ) {
2022-12-02 18:56:37 +00:00
state . s11n . storeException ( 1 , e ) ;
2022-11-29 05:25:08 +00:00
rc = GetSyncHandleError . convertRc ( e , state . sq3Codes . SQLITE _IOERR ) ;
}
await releaseImplicitLock ( fh ) ;
2022-09-20 08:27:57 +00:00
wTimeEnd ( ) ;
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'xFileSize' , rc ) ;
mTimeEnd ( ) ;
} ,
xLock : async function ( fid /*sqlite3_file pointer*/ ,
lockType /*SQLITE_LOCK_...*/ ) {
mTimeStart ( 'xLock' ) ;
const fh = _ _openFiles [ fid ] ;
let rc = 0 ;
const oldLockType = fh . xLock ;
fh . xLock = lockType ;
if ( ! fh . syncHandle ) {
wTimeStart ( 'xLock' ) ;
try {
await getSyncHandle ( fh , 'xLock' ) ;
_ _implicitLocks . delete ( fid ) ;
} catch ( e ) {
state . s11n . storeException ( 1 , e ) ;
rc = GetSyncHandleError . convertRc ( e , state . sq3Codes . SQLITE _IOERR _LOCK ) ;
fh . xLock = oldLockType ;
}
wTimeEnd ( ) ;
2022-09-17 20:50:12 +00:00
}
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'xLock' , rc ) ;
mTimeEnd ( ) ;
} ,
xOpen : async function ( fid /*sqlite3_file pointer*/ , filename ,
flags /*SQLITE_OPEN_...*/ ,
opfsFlags /*OPFS_...*/ ) {
const opName = 'xOpen' ;
mTimeStart ( opName ) ;
const create = ( state . sq3Codes . SQLITE _OPEN _CREATE & flags ) ;
wTimeStart ( 'xOpen' ) ;
try {
let hDir , filenamePart ;
try {
[ hDir , filenamePart ] = await getDirForFilename ( filename , ! ! create ) ;
} catch ( e ) {
state . s11n . storeException ( 1 , e ) ;
storeAndNotify ( opName , state . sq3Codes . SQLITE _NOTFOUND ) ;
mTimeEnd ( ) ;
wTimeEnd ( ) ;
return ;
}
const hFile = await hDir . getFileHandle ( filenamePart , { create } ) ;
wTimeEnd ( ) ;
const fh = Object . assign ( Object . create ( null ) , {
fid : fid ,
filenameAbs : filename ,
filenamePart : filenamePart ,
dirHandle : hDir ,
fileHandle : hFile ,
sabView : state . sabFileBufView ,
readOnly : create
? false : ( state . sq3Codes . SQLITE _OPEN _READONLY & flags ) ,
deleteOnClose : ! ! ( state . sq3Codes . SQLITE _OPEN _DELETEONCLOSE & flags )
} ) ;
fh . releaseImplicitLocks =
( opfsFlags & state . opfsFlags . OPFS _UNLOCK _ASAP )
|| state . opfsFlags . defaultUnlockAsap ;
if ( 0 / * this block is modelled after something wa - sqlite
2022-11-30 07:17:29 +00:00
does but it leads to immediate contention on journal files .
Update : this approach reportedly only works for DELETE journal
mode . * /
2022-11-29 05:25:08 +00:00
&& ( 0 === ( flags & state . sq3Codes . SQLITE _OPEN _MAIN _DB ) ) ) {
/ * s q l i t e d o e s n o t l o c k t h e s e f i l e s , s o g o a h e a d a n d g r a b a n O P F S
2022-11-30 07:17:29 +00:00
lock . * /
2022-11-29 05:25:08 +00:00
fh . xLock = "xOpen" / * Truthy value to keep entry from getting
flagged as auto - locked . String value so
that we can easily distinguish is later
if needed . * / ;
await getSyncHandle ( fh , 'xOpen' ) ;
}
_ _openFiles [ fid ] = fh ;
storeAndNotify ( opName , 0 ) ;
2022-09-21 12:27:35 +00:00
} catch ( e ) {
2022-11-29 05:25:08 +00:00
wTimeEnd ( ) ;
error ( opName , e ) ;
state . s11n . storeException ( 1 , e ) ;
storeAndNotify ( opName , state . sq3Codes . SQLITE _IOERR ) ;
}
mTimeEnd ( ) ;
} ,
xRead : async function ( fid /*sqlite3_file pointer*/ , n , offset64 ) {
mTimeStart ( 'xRead' ) ;
let rc = 0 , nRead ;
const fh = _ _openFiles [ fid ] ;
try {
wTimeStart ( 'xRead' ) ;
nRead = ( await getSyncHandle ( fh , 'xRead' ) ) . read (
fh . sabView . subarray ( 0 , n ) ,
{ at : Number ( offset64 ) }
) ;
wTimeEnd ( ) ;
if ( nRead < n ) { /* Zero-fill remaining bytes */
fh . sabView . fill ( 0 , nRead , n ) ;
rc = state . sq3Codes . SQLITE _IOERR _SHORT _READ ;
}
} catch ( e ) {
if ( undefined === nRead ) wTimeEnd ( ) ;
error ( "xRead() failed" , e , fh ) ;
state . s11n . storeException ( 1 , e ) ;
rc = GetSyncHandleError . convertRc ( e , state . sq3Codes . SQLITE _IOERR _READ ) ;
}
await releaseImplicitLock ( fh ) ;
storeAndNotify ( 'xRead' , rc ) ;
mTimeEnd ( ) ;
} ,
xSync : async function ( fid /*sqlite3_file pointer*/ , flags /*ignored*/ ) {
mTimeStart ( 'xSync' ) ;
const fh = _ _openFiles [ fid ] ;
let rc = 0 ;
if ( ! fh . readOnly && fh . syncHandle ) {
try {
wTimeStart ( 'xSync' ) ;
await fh . syncHandle . flush ( ) ;
} catch ( e ) {
state . s11n . storeException ( 2 , e ) ;
rc = state . sq3Codes . SQLITE _IOERR _FSYNC ;
}
wTimeEnd ( ) ;
}
storeAndNotify ( 'xSync' , rc ) ;
mTimeEnd ( ) ;
} ,
xTruncate : async function ( fid /*sqlite3_file pointer*/ , size ) {
mTimeStart ( 'xTruncate' ) ;
let rc = 0 ;
const fh = _ _openFiles [ fid ] ;
wTimeStart ( 'xTruncate' ) ;
try {
affirmNotRO ( 'xTruncate' , fh ) ;
await ( await getSyncHandle ( fh , 'xTruncate' ) ) . truncate ( size ) ;
} catch ( e ) {
error ( "xTruncate():" , e , fh ) ;
2022-09-24 10:12:19 +00:00
state . s11n . storeException ( 2 , e ) ;
2022-11-29 05:25:08 +00:00
rc = GetSyncHandleError . convertRc ( e , state . sq3Codes . SQLITE _IOERR _TRUNCATE ) ;
2022-09-21 12:27:35 +00:00
}
2022-11-29 05:25:08 +00:00
await releaseImplicitLock ( fh ) ;
2022-10-14 15:52:29 +00:00
wTimeEnd ( ) ;
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'xTruncate' , rc ) ;
mTimeEnd ( ) ;
} ,
xUnlock : async function ( fid /*sqlite3_file pointer*/ ,
lockType /*SQLITE_LOCK_...*/ ) {
mTimeStart ( 'xUnlock' ) ;
let rc = 0 ;
const fh = _ _openFiles [ fid ] ;
if ( state . sq3Codes . SQLITE _LOCK _NONE === lockType
&& fh . syncHandle ) {
wTimeStart ( 'xUnlock' ) ;
try { await closeSyncHandle ( fh ) }
catch ( e ) {
state . s11n . storeException ( 1 , e ) ;
rc = state . sq3Codes . SQLITE _IOERR _UNLOCK ;
}
wTimeEnd ( ) ;
}
storeAndNotify ( 'xUnlock' , rc ) ;
mTimeEnd ( ) ;
} ,
xWrite : async function ( fid /*sqlite3_file pointer*/ , n , offset64 ) {
mTimeStart ( 'xWrite' ) ;
let rc ;
const fh = _ _openFiles [ fid ] ;
wTimeStart ( 'xWrite' ) ;
try {
affirmNotRO ( 'xWrite' , fh ) ;
rc = (
n === ( await getSyncHandle ( fh , 'xWrite' ) )
. write ( fh . sabView . subarray ( 0 , n ) ,
{ at : Number ( offset64 ) } )
) ? 0 : state . sq3Codes . SQLITE _IOERR _WRITE ;
} catch ( e ) {
error ( "xWrite():" , e , fh ) ;
2022-10-04 17:06:51 +00:00
state . s11n . storeException ( 1 , e ) ;
2022-11-29 05:25:08 +00:00
rc = GetSyncHandleError . convertRc ( e , state . sq3Codes . SQLITE _IOERR _WRITE ) ;
2022-10-04 17:06:51 +00:00
}
2022-11-29 05:25:08 +00:00
await releaseImplicitLock ( fh ) ;
2022-10-14 15:52:29 +00:00
wTimeEnd ( ) ;
2022-11-29 05:25:08 +00:00
storeAndNotify ( 'xWrite' , rc ) ;
mTimeEnd ( ) ;
2022-10-04 17:06:51 +00:00
}
2022-11-29 05:25:08 +00:00
} /*vfsAsyncImpls*/ ;
2022-09-17 20:50:12 +00:00
2022-11-29 05:25:08 +00:00
const initS11n = ( ) => {
/ * *
ACHTUNG : this code is 100 % duplicated in the other half of this
proxy ! The documentation is maintained in the "synchronous half" .
* /
if ( state . s11n ) return state . s11n ;
const textDecoder = new TextDecoder ( ) ,
textEncoder = new TextEncoder ( 'utf-8' ) ,
viewU8 = new Uint8Array ( state . sabIO , state . sabS11nOffset , state . sabS11nSize ) ,
viewDV = new DataView ( state . sabIO , state . sabS11nOffset , state . sabS11nSize ) ;
state . s11n = Object . create ( null ) ;
const TypeIds = Object . create ( null ) ;
TypeIds . number = { id : 1 , size : 8 , getter : 'getFloat64' , setter : 'setFloat64' } ;
TypeIds . bigint = { id : 2 , size : 8 , getter : 'getBigInt64' , setter : 'setBigInt64' } ;
TypeIds . boolean = { id : 3 , size : 4 , getter : 'getInt32' , setter : 'setInt32' } ;
TypeIds . string = { id : 4 } ;
const getTypeId = ( v ) => (
TypeIds [ typeof v ]
|| toss ( "Maintenance required: this value type cannot be serialized." , v )
) ;
const getTypeIdById = ( tid ) => {
switch ( tid ) {
case TypeIds . number . id : return TypeIds . number ;
case TypeIds . bigint . id : return TypeIds . bigint ;
case TypeIds . boolean . id : return TypeIds . boolean ;
case TypeIds . string . id : return TypeIds . string ;
default : toss ( "Invalid type ID:" , tid ) ;
2022-09-20 13:25:39 +00:00
}
2022-11-29 05:25:08 +00:00
} ;
state . s11n . deserialize = function ( clear = false ) {
++ metrics . s11n . deserialize . count ;
const t = performance . now ( ) ;
const argc = viewU8 [ 0 ] ;
const rc = argc ? [ ] : null ;
if ( argc ) {
const typeIds = [ ] ;
let offset = 1 , i , n , v ;
for ( i = 0 ; i < argc ; ++ i , ++ offset ) {
typeIds . push ( getTypeIdById ( viewU8 [ offset ] ) ) ;
}
for ( i = 0 ; i < argc ; ++ i ) {
const t = typeIds [ i ] ;
if ( t . getter ) {
v = viewDV [ t . getter ] ( offset , state . littleEndian ) ;
offset += t . size ;
} else { /*String*/
n = viewDV . getInt32 ( offset , state . littleEndian ) ;
offset += 4 ;
v = textDecoder . decode ( viewU8 . slice ( offset , offset + n ) ) ;
offset += n ;
}
rc . push ( v ) ;
2022-09-20 13:25:39 +00:00
}
}
2022-11-29 05:25:08 +00:00
if ( clear ) viewU8 [ 0 ] = 0 ;
//log("deserialize:",argc, rc);
metrics . s11n . deserialize . time += performance . now ( ) - t ;
return rc ;
} ;
state . s11n . serialize = function ( ... args ) {
const t = performance . now ( ) ;
++ metrics . s11n . serialize . count ;
if ( args . length ) {
//log("serialize():",args);
const typeIds = [ ] ;
let i = 0 , offset = 1 ;
viewU8 [ 0 ] = args . length & 0xff /* header = # of args */ ;
for ( ; i < args . length ; ++ i , ++ offset ) {
/ * W r i t e t h e T y p e I d s . i d v a l u e i n t o t h e n e x t a r g s . l e n g t h
bytes . * /
typeIds . push ( getTypeId ( args [ i ] ) ) ;
viewU8 [ offset ] = typeIds [ i ] . id ;
}
for ( i = 0 ; i < args . length ; ++ i ) {
/ * D e s e r i a l i z e t h e f o l l o w i n g b y t e s b a s e d o n t h e i r
corresponding TypeIds . id from the header . * /
const t = typeIds [ i ] ;
if ( t . setter ) {
viewDV [ t . setter ] ( offset , args [ i ] , state . littleEndian ) ;
offset += t . size ;
} else { /*String*/
const s = textEncoder . encode ( args [ i ] ) ;
viewDV . setInt32 ( offset , s . byteLength , state . littleEndian ) ;
offset += 4 ;
viewU8 . set ( s , offset ) ;
offset += s . byteLength ;
}
}
//log("serialize() result:",viewU8.slice(0,offset));
} else {
viewU8 [ 0 ] = 0 ;
2022-09-20 13:25:39 +00:00
}
2022-11-29 05:25:08 +00:00
metrics . s11n . serialize . time += performance . now ( ) - t ;
} ;
state . s11n . storeException = state . asyncS11nExceptions
? ( ( priority , e ) => {
if ( priority <= state . asyncS11nExceptions ) {
state . s11n . serialize ( [ e . name , ': ' , e . message ] . join ( "" ) ) ;
2022-09-20 13:25:39 +00:00
}
2022-11-29 05:25:08 +00:00
} )
: ( ) => { } ;
return state . s11n ;
} /*initS11n()*/ ;
const waitLoop = async function f ( ) {
const opHandlers = Object . create ( null ) ;
for ( let k of Object . keys ( state . opIds ) ) {
const vi = vfsAsyncImpls [ k ] ;
if ( ! vi ) continue ;
const o = Object . create ( null ) ;
opHandlers [ state . opIds [ k ] ] = o ;
o . key = k ;
o . f = vi ;
}
while ( ! flagAsyncShutdown ) {
try {
2023-05-30 14:10:28 +00:00
if ( 'not-equal' !== Atomics . wait (
2022-11-30 08:37:17 +00:00
state . sabOPView , state . opIds . whichOp , 0 , state . asyncIdleWaitTime
2022-11-29 05:25:08 +00:00
) ) {
2023-05-30 14:10:28 +00:00
/ * M a i n t e n a n c e n o t e : w e c o m p a r e a g a i n s t ' n o t - e q u a l ' b e c a u s e
https : //github.com/tomayac/sqlite-wasm/issues/12
is reporting that this occassionally , under high loads ,
returns 'ok' , which leads to the whichOp being 0 ( which
isn ' t a valid operation ID and leads to an exception ,
along with a corresponding ugly console log
message ) . Unfortunately , the conditions for that cannot
be reliably reproduced . The only place in our code which
writes a 0 to the state . opIds . whichOp SharedArrayBuffer
index is a few lines down from here , and that instance
is required in order for clear communication between
the sync half of this proxy and this half .
* /
2022-11-29 05:25:08 +00:00
await releaseImplicitLocks ( ) ;
continue ;
}
const opId = Atomics . load ( state . sabOPView , state . opIds . whichOp ) ;
Atomics . store ( state . sabOPView , state . opIds . whichOp , 0 ) ;
const hnd = opHandlers [ opId ] ? ? toss ( "No waitLoop handler for whichOp #" , opId ) ;
const args = state . s11n . deserialize (
true / * clear s11n to keep the caller from confusing this with
an exception string written by the upcoming
operation * /
) || [ ] ;
//warn("waitLoop() whichOp =",opId, hnd, args);
if ( hnd . f ) await hnd . f ( ... args ) ;
else error ( "Missing callback for opId" , opId ) ;
} catch ( e ) {
error ( 'in waitLoop():' , e ) ;
2022-09-20 13:25:39 +00:00
}
2022-09-20 08:27:57 +00:00
}
2022-09-20 03:31:02 +00:00
} ;
2022-09-21 14:02:47 +00:00
2022-11-29 05:25:08 +00:00
navigator . storage . getDirectory ( ) . then ( function ( d ) {
state . rootDir = d ;
2023-03-07 19:12:06 +00:00
globalThis . onmessage = function ( { data } ) {
2022-11-29 05:25:08 +00:00
switch ( data . type ) {
case 'opfs-async-init' : {
/* Receive shared state from synchronous partner */
const opt = data . args ;
2022-11-30 08:37:17 +00:00
for ( const k in opt ) state [ k ] = opt [ k ] ;
2022-11-29 05:25:08 +00:00
state . verbose = opt . verbose ? ? 1 ;
state . sabOPView = new Int32Array ( state . sabOP ) ;
state . sabFileBufView = new Uint8Array ( state . sabIO , 0 , state . fileBufferSize ) ;
state . sabS11nView = new Uint8Array ( state . sabIO , state . sabS11nOffset , state . sabS11nSize ) ;
Object . keys ( vfsAsyncImpls ) . forEach ( ( k ) => {
if ( ! Number . isFinite ( state . opIds [ k ] ) ) {
toss ( "Maintenance required: missing state.opIds[" , k , "]" ) ;
}
} ) ;
initS11n ( ) ;
metrics . reset ( ) ;
log ( "init state" , state ) ;
wPost ( 'opfs-async-inited' ) ;
2022-10-04 00:54:00 +00:00
waitLoop ( ) ;
2022-11-29 05:25:08 +00:00
break ;
2022-10-04 00:54:00 +00:00
}
2022-11-29 05:25:08 +00:00
case 'opfs-async-restart' :
if ( flagAsyncShutdown ) {
warn ( "Restarting after opfs-async-shutdown. Might or might not work." ) ;
flagAsyncShutdown = false ;
waitLoop ( ) ;
}
break ;
case 'opfs-async-metrics' :
metrics . dump ( ) ;
break ;
}
} ;
wPost ( 'opfs-async-loaded' ) ;
} ) . catch ( ( e ) => error ( "error initializing OPFS asyncer:" , e ) ) ;
} /*installAsyncProxy()*/ ;
2023-03-07 19:12:06 +00:00
if ( ! globalThis . SharedArrayBuffer ) {
2022-11-29 05:25:08 +00:00
wPost ( 'opfs-unavailable' , "Missing SharedArrayBuffer API." ,
"The server must emit the COOP/COEP response headers to enable that." ) ;
2023-03-07 19:12:06 +00:00
} else if ( ! globalThis . Atomics ) {
2022-11-29 05:25:08 +00:00
wPost ( 'opfs-unavailable' , "Missing Atomics API." ,
"The server must emit the COOP/COEP response headers to enable that." ) ;
2023-03-07 19:12:06 +00:00
} else if ( ! globalThis . FileSystemHandle ||
! globalThis . FileSystemDirectoryHandle ||
! globalThis . FileSystemFileHandle ||
! globalThis . FileSystemFileHandle . prototype . createSyncAccessHandle ||
! navigator ? . storage ? . getDirectory ) {
2022-11-29 05:25:08 +00:00
wPost ( 'opfs-unavailable' , "Missing required OPFS APIs." ) ;
} else {
installAsyncProxy ( self ) ;
}