Initial commit

This commit is contained in:
bilulib
2025-04-13 00:18:57 +02:00
parent cff009bb7c
commit d894249e61
18301 changed files with 2905442 additions and 3845 deletions

View File

@@ -0,0 +1,13 @@
type Opaque<K, T> = T & {
__brand: K;
};
export type NormalizedHref = Opaque<'NormalizedHref', string>;
export type NormalizedSearch = Opaque<'NormalizedSearch', string>;
export type NormalizedNextUrl = Opaque<'NormalizedNextUrl', string>;
export type RouteCacheKey = Opaque<'RouteCacheKey', {
href: NormalizedHref;
search: NormalizedSearch;
nextUrl: NormalizedNextUrl | null;
}>;
export declare function createCacheKey(originalHref: string, nextUrl: string | null): RouteCacheKey;
export {};

View File

@@ -0,0 +1,28 @@
// TypeScript trick to simulate opaque types, like in Flow.
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "createCacheKey", {
enumerable: true,
get: function() {
return createCacheKey;
}
});
function createCacheKey(originalHref, nextUrl) {
const originalUrl = new URL(originalHref);
const cacheKey = {
href: originalHref,
search: originalUrl.search,
nextUrl: nextUrl
};
return cacheKey;
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=cache-key.js.map

View File

@@ -0,0 +1 @@
{"version":3,"sources":["../../../../src/client/components/segment-cache-impl/cache-key.ts"],"sourcesContent":["// TypeScript trick to simulate opaque types, like in Flow.\ntype Opaque<K, T> = T & { __brand: K }\n\n// Only functions in this module should be allowed to create CacheKeys.\nexport type NormalizedHref = Opaque<'NormalizedHref', string>\nexport type NormalizedSearch = Opaque<'NormalizedSearch', string>\nexport type NormalizedNextUrl = Opaque<'NormalizedNextUrl', string>\n\nexport type RouteCacheKey = Opaque<\n 'RouteCacheKey',\n {\n href: NormalizedHref\n search: NormalizedSearch\n nextUrl: NormalizedNextUrl | null\n\n // TODO: Eventually the dynamic params will be added here, too.\n }\n>\n\nexport function createCacheKey(\n originalHref: string,\n nextUrl: string | null\n): RouteCacheKey {\n const originalUrl = new URL(originalHref)\n const cacheKey = {\n href: originalHref as NormalizedHref,\n search: originalUrl.search as NormalizedSearch,\n nextUrl: nextUrl as NormalizedNextUrl | null,\n } as RouteCacheKey\n return cacheKey\n}\n"],"names":["createCacheKey","originalHref","nextUrl","originalUrl","URL","cacheKey","href","search"],"mappings":"AAAA,2DAA2D;;;;;+BAmB3CA;;;eAAAA;;;AAAT,SAASA,eACdC,YAAoB,EACpBC,OAAsB;IAEtB,MAAMC,cAAc,IAAIC,IAAIH;IAC5B,MAAMI,WAAW;QACfC,MAAML;QACNM,QAAQJ,YAAYI,MAAM;QAC1BL,SAASA;IACX;IACA,OAAOG;AACT"}

View File

@@ -0,0 +1,140 @@
import type { HeadData, LoadingModuleData } from '../../../shared/lib/app-router-context.shared-runtime';
import type { Segment as FlightRouterStateSegment } from '../../../server/app-render/types';
import { type PrefetchTask, type PrefetchSubtaskResult } from './scheduler';
import type { NormalizedHref, NormalizedNextUrl, NormalizedSearch, RouteCacheKey } from './cache-key';
import { type Prefix } from './tuple-map';
import type { FlightRouterState } from '../../../server/app-render/types';
export type RouteTree = {
key: string;
segment: FlightRouterStateSegment;
slots: null | {
[parallelRouteKey: string]: RouteTree;
};
isRootLayout: boolean;
};
type RouteCacheEntryShared = {
staleAt: number;
couldBeIntercepted: boolean;
keypath: null | Prefix<RouteCacheKeypath>;
next: null | RouteCacheEntry;
prev: null | RouteCacheEntry;
size: number;
};
/**
* Tracks the status of a cache entry as it progresses from no data (Empty),
* waiting for server data (Pending), and finished (either Fulfilled or
* Rejected depending on the response from the server.
*/
export declare const enum EntryStatus {
Empty = 0,
Pending = 1,
Fulfilled = 2,
Rejected = 3
}
type PendingRouteCacheEntry = RouteCacheEntryShared & {
status: EntryStatus.Empty | EntryStatus.Pending;
blockedTasks: Set<PrefetchTask> | null;
canonicalUrl: null;
tree: null;
head: HeadData | null;
isHeadPartial: true;
isPPREnabled: false;
};
type RejectedRouteCacheEntry = RouteCacheEntryShared & {
status: EntryStatus.Rejected;
blockedTasks: Set<PrefetchTask> | null;
canonicalUrl: null;
tree: null;
head: null;
isHeadPartial: true;
isPPREnabled: boolean;
};
export type FulfilledRouteCacheEntry = RouteCacheEntryShared & {
status: EntryStatus.Fulfilled;
blockedTasks: null;
canonicalUrl: string;
tree: RouteTree;
head: HeadData;
isHeadPartial: boolean;
isPPREnabled: boolean;
};
export type RouteCacheEntry = PendingRouteCacheEntry | FulfilledRouteCacheEntry | RejectedRouteCacheEntry;
export declare const enum FetchStrategy {
PPR = 0,
Full = 1,
LoadingBoundary = 2
}
type SegmentCacheEntryShared = {
staleAt: number;
fetchStrategy: FetchStrategy;
revalidating: SegmentCacheEntry | null;
keypath: null | Prefix<SegmentCacheKeypath>;
next: null | SegmentCacheEntry;
prev: null | SegmentCacheEntry;
size: number;
};
export type EmptySegmentCacheEntry = SegmentCacheEntryShared & {
status: EntryStatus.Empty;
rsc: null;
loading: null;
isPartial: true;
promise: null;
};
export type PendingSegmentCacheEntry = SegmentCacheEntryShared & {
status: EntryStatus.Pending;
rsc: null;
loading: null;
isPartial: true;
promise: null | PromiseWithResolvers<FulfilledSegmentCacheEntry | null>;
};
type RejectedSegmentCacheEntry = SegmentCacheEntryShared & {
status: EntryStatus.Rejected;
rsc: null;
loading: null;
isPartial: true;
promise: null;
};
export type FulfilledSegmentCacheEntry = SegmentCacheEntryShared & {
status: EntryStatus.Fulfilled;
rsc: React.ReactNode | null;
loading: LoadingModuleData | Promise<LoadingModuleData>;
isPartial: boolean;
promise: null;
};
export type SegmentCacheEntry = EmptySegmentCacheEntry | PendingSegmentCacheEntry | RejectedSegmentCacheEntry | FulfilledSegmentCacheEntry;
export type NonEmptySegmentCacheEntry = Exclude<SegmentCacheEntry, EmptySegmentCacheEntry>;
type RouteCacheKeypath = [NormalizedHref, NormalizedNextUrl];
type SegmentCacheKeypath = [string, NormalizedSearch];
export declare function getCurrentCacheVersion(): number;
/**
* Used to clear the client prefetch cache when a server action calls
* revalidatePath or revalidateTag. Eventually we will support only clearing the
* segments that were actually affected, but there's more work to be done on the
* server before the client is able to do this correctly.
*/
export declare function revalidateEntireCache(nextUrl: string | null, tree: FlightRouterState): void;
export declare function readExactRouteCacheEntry(now: number, href: NormalizedHref, nextUrl: NormalizedNextUrl | null): RouteCacheEntry | null;
export declare function readRouteCacheEntry(now: number, key: RouteCacheKey): RouteCacheEntry | null;
export declare function getSegmentKeypathForTask(task: PrefetchTask, route: FulfilledRouteCacheEntry, path: string): Prefix<SegmentCacheKeypath>;
export declare function readSegmentCacheEntry(now: number, routeCacheKey: RouteCacheKey, path: string): SegmentCacheEntry | null;
export declare function waitForSegmentCacheEntry(pendingEntry: PendingSegmentCacheEntry): Promise<FulfilledSegmentCacheEntry | null>;
/**
* Checks if an entry for a route exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/
export declare function readOrCreateRouteCacheEntry(now: number, task: PrefetchTask): RouteCacheEntry;
/**
* Checks if an entry for a segment exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/
export declare function readOrCreateSegmentCacheEntry(now: number, task: PrefetchTask, route: FulfilledRouteCacheEntry, path: string): SegmentCacheEntry;
export declare function readOrCreateRevalidatingSegmentEntry(now: number, prevEntry: SegmentCacheEntry): SegmentCacheEntry;
export declare function upsertSegmentEntry(now: number, keypath: Prefix<SegmentCacheKeypath>, candidateEntry: SegmentCacheEntry): SegmentCacheEntry | null;
export declare function createDetachedSegmentCacheEntry(staleAt: number): EmptySegmentCacheEntry;
export declare function upgradeToPendingSegment(emptyEntry: EmptySegmentCacheEntry, fetchStrategy: FetchStrategy): PendingSegmentCacheEntry;
export declare function resetRevalidatingSegmentEntry(owner: SegmentCacheEntry): EmptySegmentCacheEntry;
export declare function convertRouteTreeToFlightRouterState(routeTree: RouteTree): FlightRouterState;
export declare function fetchRouteOnCacheMiss(entry: PendingRouteCacheEntry, task: PrefetchTask): Promise<PrefetchSubtaskResult<null> | null>;
export declare function fetchSegmentOnCacheMiss(route: FulfilledRouteCacheEntry, segmentCacheEntry: PendingSegmentCacheEntry, routeKey: RouteCacheKey, segmentPath: string): Promise<PrefetchSubtaskResult<FulfilledSegmentCacheEntry> | null>;
export declare function fetchSegmentPrefetchesUsingDynamicRequest(task: PrefetchTask, route: FulfilledRouteCacheEntry, fetchStrategy: FetchStrategy, dynamicRequestTree: FlightRouterState, spawnedEntries: Map<string, PendingSegmentCacheEntry>): Promise<PrefetchSubtaskResult<null> | null>;
export {};

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,12 @@
export type LRU<T extends LRUNode> = {
put(node: T): void;
delete(node: T): void;
updateSize(node: T, size: number): void;
};
type LRUNode<T = any> = {
prev: T | null;
next: T | null;
size: number;
};
export declare function createLRU<T extends LRUNode>(maxLruSize: number, onEviction: (node: T) => void): LRU<T>;
export {};

View File

@@ -0,0 +1,121 @@
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "createLRU", {
enumerable: true,
get: function() {
return createLRU;
}
});
function createLRU(// From the LRU's perspective, the size unit is arbitrary, but for our
// purposes this is the byte size.
maxLruSize, onEviction) {
let head = null;
let didScheduleCleanup = false;
let lruSize = 0;
function put(node) {
if (head === node) {
// Already at the head
return;
}
const prev = node.prev;
const next = node.next;
if (next === null || prev === null) {
// This is an insertion
lruSize += node.size;
// Whenever we add an entry, we need to check if we've exceeded the
// max size. We don't evict entries immediately; they're evicted later in
// an asynchronous task.
ensureCleanupIsScheduled();
} else {
// This is a move. Remove from its current position.
prev.next = next;
next.prev = prev;
}
// Move to the front of the list
if (head === null) {
// This is the first entry
node.prev = node;
node.next = node;
} else {
// Add to the front of the list
const tail = head.prev;
node.prev = tail;
tail.next = node;
node.next = head;
head.prev = node;
}
head = node;
}
function updateSize(node, newNodeSize) {
// This is a separate function from `put` so that we can resize the entry
// regardless of whether it's currently being tracked by the LRU.
const prevNodeSize = node.size;
node.size = newNodeSize;
if (node.next === null) {
// This entry is not currently being tracked by the LRU.
return;
}
// Update the total LRU size
lruSize = lruSize - prevNodeSize + newNodeSize;
ensureCleanupIsScheduled();
}
function deleteNode(deleted) {
const next = deleted.next;
const prev = deleted.prev;
if (next !== null && prev !== null) {
lruSize -= deleted.size;
deleted.next = null;
deleted.prev = null;
// Remove from the list
if (head === deleted) {
// Update the head
if (next === head) {
// This was the last entry
head = null;
} else {
head = next;
}
} else {
prev.next = next;
next.prev = prev;
}
} else {
// Already deleted
}
}
function ensureCleanupIsScheduled() {
if (didScheduleCleanup || lruSize <= maxLruSize) {
return;
}
didScheduleCleanup = true;
requestCleanupCallback(cleanup);
}
function cleanup() {
didScheduleCleanup = false;
// Evict entries until we're at 90% capacity. We can assume this won't
// infinite loop because even if `maxLruSize` were 0, eventually
// `deleteNode` sets `head` to `null` when we run out entries.
const ninetyPercentMax = maxLruSize * 0.9;
while(lruSize > ninetyPercentMax && head !== null){
const tail = head.prev;
deleteNode(tail);
onEviction(tail);
}
}
return {
put,
delete: deleteNode,
updateSize
};
}
const requestCleanupCallback = typeof requestIdleCallback === 'function' ? requestIdleCallback : (cb)=>setTimeout(cb, 0);
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=lru.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,40 @@
import type { FlightRouterState, FlightSegmentPath } from '../../../server/app-render/types';
import type { CacheNode } from '../../../shared/lib/app-router-context.shared-runtime';
import { NavigationResultTag } from '../segment-cache';
type MPANavigationResult = {
tag: NavigationResultTag.MPA;
data: string;
};
type NoOpNavigationResult = {
tag: NavigationResultTag.NoOp;
data: {
canonicalUrl: string;
shouldScroll: boolean;
};
};
type SuccessfulNavigationResult = {
tag: NavigationResultTag.Success;
data: {
flightRouterState: FlightRouterState;
cacheNode: CacheNode;
canonicalUrl: string;
scrollableSegments: Array<FlightSegmentPath>;
shouldScroll: boolean;
hash: string;
};
};
type AsyncNavigationResult = {
tag: NavigationResultTag.Async;
data: Promise<MPANavigationResult | NoOpNavigationResult | SuccessfulNavigationResult>;
};
export type NavigationResult = MPANavigationResult | SuccessfulNavigationResult | NoOpNavigationResult | AsyncNavigationResult;
/**
* Navigate to a new URL, using the Segment Cache to construct a response.
*
* To allow for synchronous navigations whenever possible, this is not an async
* function. It returns a promise only if there's no matching prefetch in
* the cache. Otherwise it returns an immediate result and uses Suspense/RSC to
* stream in any missing data.
*/
export declare function navigate(url: URL, currentCacheNode: CacheNode, currentFlightRouterState: FlightRouterState, nextUrl: string | null, shouldScroll: boolean): NavigationResult;
export {};

View File

@@ -0,0 +1,341 @@
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "navigate", {
enumerable: true,
get: function() {
return navigate;
}
});
const _fetchserverresponse = require("../router-reducer/fetch-server-response");
const _pprnavigations = require("../router-reducer/ppr-navigations");
const _createhreffromurl = require("../router-reducer/create-href-from-url");
const _cache = require("./cache");
const _cachekey = require("./cache-key");
const _segment = require("../../../shared/lib/segment");
const _segmentcache = require("../segment-cache");
function navigate(url, currentCacheNode, currentFlightRouterState, nextUrl, shouldScroll) {
const now = Date.now();
const href = url.href;
// We special case navigations to the exact same URL as the current location.
// It's a common UI pattern for apps to refresh when you click a link to the
// current page. So when this happens, we refresh the dynamic data in the page
// segments.
//
// Note that this does not apply if the any part of the hash or search query
// has changed. This might feel a bit weird but it makes more sense when you
// consider that the way to trigger this behavior is to click the same link
// multiple times.
//
// TODO: We should probably refresh the *entire* route when this case occurs,
// not just the page segments. Essentially treating it the same as a refresh()
// triggered by an action, which is the more explicit way of modeling the UI
// pattern described above.
//
// Also note that this only refreshes the dynamic data, not static/ cached
// data. If the page segment is fully static and prefetched, the request is
// skipped. (This is also how refresh() works.)
const isSamePageNavigation = // TODO: This is not the only place we read from the location, but we should
// consider storing the current URL in the router state instead of reading
// from the location object. In practice I don't think this matters much
// since we keep them in sync anyway, but having two sources of truth can
// lead to subtle bugs and race conditions.
href === window.location.href;
const cacheKey = (0, _cachekey.createCacheKey)(href, nextUrl);
const route = (0, _cache.readRouteCacheEntry)(now, cacheKey);
if (route !== null && route.status === _cache.EntryStatus.Fulfilled) {
// We have a matching prefetch.
const snapshot = readRenderSnapshotFromCache(now, cacheKey, route.tree);
const prefetchFlightRouterState = snapshot.flightRouterState;
const prefetchSeedData = snapshot.seedData;
const prefetchHead = route.head;
const isPrefetchHeadPartial = route.isHeadPartial;
const newCanonicalUrl = route.canonicalUrl;
return navigateUsingPrefetchedRouteTree(now, url, nextUrl, isSamePageNavigation, currentCacheNode, currentFlightRouterState, prefetchFlightRouterState, prefetchSeedData, prefetchHead, isPrefetchHeadPartial, newCanonicalUrl, shouldScroll, url.hash);
}
// There's no matching prefetch for this route in the cache.
return {
tag: _segmentcache.NavigationResultTag.Async,
data: navigateDynamicallyWithNoPrefetch(now, url, nextUrl, isSamePageNavigation, currentCacheNode, currentFlightRouterState, shouldScroll, url.hash)
};
}
function navigateUsingPrefetchedRouteTree(now, url, nextUrl, isSamePageNavigation, currentCacheNode, currentFlightRouterState, prefetchFlightRouterState, prefetchSeedData, prefetchHead, isPrefetchHeadPartial, canonicalUrl, shouldScroll, hash) {
// Recursively construct a prefetch tree by reading from the Segment Cache. To
// maintain compatibility, we output the same data structures as the old
// prefetching implementation: FlightRouterState and CacheNodeSeedData.
// TODO: Eventually updateCacheNodeOnNavigation (or the equivalent) should
// read from the Segment Cache directly. It's only structured this way for now
// so we can share code with the old prefetching implementation.
const scrollableSegments = [];
const task = (0, _pprnavigations.startPPRNavigation)(now, currentCacheNode, currentFlightRouterState, prefetchFlightRouterState, prefetchSeedData, prefetchHead, isPrefetchHeadPartial, isSamePageNavigation, scrollableSegments);
if (task !== null) {
const dynamicRequestTree = task.dynamicRequestTree;
if (dynamicRequestTree !== null) {
const promiseForDynamicServerResponse = (0, _fetchserverresponse.fetchServerResponse)(url, {
flightRouterState: dynamicRequestTree,
nextUrl
});
(0, _pprnavigations.listenForDynamicRequest)(task, promiseForDynamicServerResponse);
} else {
// The prefetched tree does not contain dynamic holes — it's
// fully static. We can skip the dynamic request.
}
return navigationTaskToResult(task, currentCacheNode, canonicalUrl, scrollableSegments, shouldScroll, hash);
}
// The server sent back an empty tree patch. There's nothing to update, except
// possibly the URL.
return {
tag: _segmentcache.NavigationResultTag.NoOp,
data: {
canonicalUrl,
shouldScroll
}
};
}
function navigationTaskToResult(task, currentCacheNode, canonicalUrl, scrollableSegments, shouldScroll, hash) {
const flightRouterState = task.route;
if (flightRouterState === null) {
// When no router state is provided, it signals that we should perform an
// MPA navigation.
return {
tag: _segmentcache.NavigationResultTag.MPA,
data: canonicalUrl
};
}
const newCacheNode = task.node;
return {
tag: _segmentcache.NavigationResultTag.Success,
data: {
flightRouterState,
cacheNode: newCacheNode !== null ? newCacheNode : currentCacheNode,
canonicalUrl,
scrollableSegments,
shouldScroll,
hash
}
};
}
function readRenderSnapshotFromCache(now, routeCacheKey, tree) {
let childRouterStates = {};
let childSeedDatas = {};
const slots = tree.slots;
if (slots !== null) {
for(const parallelRouteKey in slots){
const childTree = slots[parallelRouteKey];
const childResult = readRenderSnapshotFromCache(now, routeCacheKey, childTree);
childRouterStates[parallelRouteKey] = childResult.flightRouterState;
childSeedDatas[parallelRouteKey] = childResult.seedData;
}
}
let rsc = null;
let loading = null;
let isPartial = true;
const segmentEntry = (0, _cache.readSegmentCacheEntry)(now, routeCacheKey, tree.key);
if (segmentEntry !== null) {
switch(segmentEntry.status){
case _cache.EntryStatus.Fulfilled:
{
// Happy path: a cache hit
rsc = segmentEntry.rsc;
loading = segmentEntry.loading;
isPartial = segmentEntry.isPartial;
break;
}
case _cache.EntryStatus.Pending:
{
// We haven't received data for this segment yet, but there's already
// an in-progress request. Since it's extremely likely to arrive
// before the dynamic data response, we might as well use it.
const promiseForFulfilledEntry = (0, _cache.waitForSegmentCacheEntry)(segmentEntry);
rsc = promiseForFulfilledEntry.then((entry)=>entry !== null ? entry.rsc : null);
loading = promiseForFulfilledEntry.then((entry)=>entry !== null ? entry.loading : null);
// Since we don't know yet whether the segment is partial or fully
// static, we must assume it's partial; we can't skip the
// dynamic request.
isPartial = true;
break;
}
case _cache.EntryStatus.Empty:
case _cache.EntryStatus.Rejected:
break;
default:
segmentEntry;
}
}
const segment = tree.segment === _segment.PAGE_SEGMENT_KEY && routeCacheKey.search ? // included in the segment. However, the Segment Cache tracks search
// params separately from the rest of the segment key. So we need to
// add them back here.
//
// See corresponding comment in convertFlightRouterStateToTree.
//
// TODO: What we should do instead is update the navigation diffing
// logic to compare search params explicitly. This is a temporary
// solution until more of the Segment Cache implementation has settled.
(0, _segment.addSearchParamsIfPageSegment)(tree.segment, Object.fromEntries(new URLSearchParams(routeCacheKey.search))) : tree.segment;
return {
flightRouterState: [
segment,
childRouterStates,
null,
null,
tree.isRootLayout
],
seedData: [
segment,
rsc,
childSeedDatas,
loading,
isPartial
]
};
}
async function navigateDynamicallyWithNoPrefetch(now, url, nextUrl, isSamePageNavigation, currentCacheNode, currentFlightRouterState, shouldScroll, hash) {
// Runs when a navigation happens but there's no cached prefetch we can use.
// Don't bother to wait for a prefetch response; go straight to a full
// navigation that contains both static and dynamic data in a single stream.
// (This is unlike the old navigation implementation, which instead blocks
// the dynamic request until a prefetch request is received.)
//
// To avoid duplication of logic, we're going to pretend that the tree
// returned by the dynamic request is, in fact, a prefetch tree. Then we can
// use the same server response to write the actual data into the CacheNode
// tree. So it's the same flow as the "happy path" (prefetch, then
// navigation), except we use a single server response for both stages.
const promiseForDynamicServerResponse = (0, _fetchserverresponse.fetchServerResponse)(url, {
flightRouterState: currentFlightRouterState,
nextUrl
});
const { flightData, canonicalUrl: canonicalUrlOverride } = await promiseForDynamicServerResponse;
if (typeof flightData === 'string') {
// This is an MPA navigation.
const newUrl = flightData;
return {
tag: _segmentcache.NavigationResultTag.MPA,
data: newUrl
};
}
// Since the response format of dynamic requests and prefetches is slightly
// different, we'll need to massage the data a bit. Create FlightRouterState
// tree that simulates what we'd receive as the result of a prefetch.
const prefetchFlightRouterState = simulatePrefetchTreeUsingDynamicTreePatch(currentFlightRouterState, flightData);
// In our simulated prefetch payload, we pretend that there's no seed data
// nor a prefetch head.
const prefetchSeedData = null;
const prefetchHead = null;
const isPrefetchHeadPartial = true;
const canonicalUrl = (0, _createhreffromurl.createHrefFromUrl)(canonicalUrlOverride ? canonicalUrlOverride : url);
// Now we proceed exactly as we would for normal navigation.
const scrollableSegments = [];
const task = (0, _pprnavigations.startPPRNavigation)(now, currentCacheNode, currentFlightRouterState, prefetchFlightRouterState, prefetchSeedData, prefetchHead, isPrefetchHeadPartial, isSamePageNavigation, scrollableSegments);
if (task !== null) {
// In this case, we've already sent the dynamic request, so we don't
// actually use the request tree created by `startPPRNavigation`,
// except to check if it contains dynamic holes.
//
// This is almost always true, but it could be false if all the segment data
// was present in the cache, but the route tree was not. E.g. navigating
// to a URL that was not prefetched but rewrites to a different URL
// that was.
const hasDynamicHoles = task.dynamicRequestTree !== null;
if (hasDynamicHoles) {
(0, _pprnavigations.listenForDynamicRequest)(task, promiseForDynamicServerResponse);
} else {
// The prefetched tree does not contain dynamic holes — it's
// fully static. We don't need to process the server response further.
}
return navigationTaskToResult(task, currentCacheNode, canonicalUrl, scrollableSegments, shouldScroll, hash);
}
// The server sent back an empty tree patch. There's nothing to update, except
// possibly the URL.
return {
tag: _segmentcache.NavigationResultTag.NoOp,
data: {
canonicalUrl,
shouldScroll
}
};
}
function simulatePrefetchTreeUsingDynamicTreePatch(currentTree, flightData) {
// Takes the current FlightRouterState and applies the router state patch
// received from the server, to create a full FlightRouterState tree that we
// can pretend was returned by a prefetch.
//
// (It sounds similar to what applyRouterStatePatch does, but it doesn't need
// to handle stuff like interception routes or diffing since that will be
// handled later.)
let baseTree = currentTree;
for (const { segmentPath, tree: treePatch } of flightData){
// If the server sends us multiple tree patches, we only need to clone the
// base tree when applying the first patch. After the first patch, we can
// apply the remaining patches in place without copying.
const canMutateInPlace = baseTree !== currentTree;
baseTree = simulatePrefetchTreeUsingDynamicTreePatchImpl(baseTree, treePatch, segmentPath, canMutateInPlace, 0);
}
return baseTree;
}
function simulatePrefetchTreeUsingDynamicTreePatchImpl(baseRouterState, patch, segmentPath, canMutateInPlace, index) {
if (index === segmentPath.length) {
// We reached the part of the tree that we need to patch.
return patch;
}
// segmentPath represents the parent path of subtree. It's a repeating
// pattern of parallel route key and segment:
//
// [string, Segment, string, Segment, string, Segment, ...]
//
// This path tells us which part of the base tree to apply the tree patch.
//
// NOTE: In the case of a fully dynamic request with no prefetch, we receive
// the FlightRouterState patch in the same request as the dynamic data.
// Therefore we don't need to worry about diffing the segment values; we can
// assume the server sent us a correct result.
const updatedParallelRouteKey = segmentPath[index];
// const segment: Segment = segmentPath[index + 1] <-- Not used, see note above
const baseChildren = baseRouterState[1];
const newChildren = {};
for(const parallelRouteKey in baseChildren){
if (parallelRouteKey === updatedParallelRouteKey) {
const childBaseRouterState = baseChildren[parallelRouteKey];
newChildren[parallelRouteKey] = simulatePrefetchTreeUsingDynamicTreePatchImpl(childBaseRouterState, patch, segmentPath, canMutateInPlace, // Advance the index by two and keep cloning until we reach
// the end of the segment path.
index + 2);
} else {
// This child is not being patched. Copy it over as-is.
newChildren[parallelRouteKey] = baseChildren[parallelRouteKey];
}
}
if (canMutateInPlace) {
// We can mutate the base tree in place, because the base tree is already
// a clone.
baseRouterState[1] = newChildren;
return baseRouterState;
}
// Clone all the fields except the children.
//
// Based on equivalent logic in apply-router-state-patch-to-tree, but should
// confirm whether we need to copy all of these fields. Not sure the server
// ever sends, e.g. the refetch marker.
const clone = [
baseRouterState[0],
newChildren
];
if (2 in baseRouterState) {
clone[2] = baseRouterState[2];
}
if (3 in baseRouterState) {
clone[3] = baseRouterState[3];
}
if (4 in baseRouterState) {
clone[4] = baseRouterState[4];
}
return clone;
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=navigation.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,13 @@
import type { FlightRouterState } from '../../../server/app-render/types';
/**
* Entrypoint for prefetching a URL into the Segment Cache.
* @param href - The URL to prefetch. Typically this will come from a <Link>,
* or router.prefetch. It must be validated before we attempt to prefetch it.
* @param nextUrl - A special header used by the server for interception routes.
* Roughly corresponds to the current URL.
* @param treeAtTimeOfPrefetch - The FlightRouterState at the time the prefetch
* was requested. This is only used when PPR is disabled.
* @param includeDynamicData - Whether to prefetch dynamic data, in addition to
* static data. This is used by <Link prefetch={true}>.
*/
export declare function prefetch(href: string, nextUrl: string | null, treeAtTimeOfPrefetch: FlightRouterState, includeDynamicData: boolean): void;

View File

@@ -0,0 +1,31 @@
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "prefetch", {
enumerable: true,
get: function() {
return prefetch;
}
});
const _approuter = require("../app-router");
const _cachekey = require("./cache-key");
const _scheduler = require("./scheduler");
const _segmentcache = require("../segment-cache");
function prefetch(href, nextUrl, treeAtTimeOfPrefetch, includeDynamicData) {
const url = (0, _approuter.createPrefetchURL)(href);
if (url === null) {
// This href should not be prefetched.
return;
}
const cacheKey = (0, _cachekey.createCacheKey)(url.href, nextUrl);
(0, _scheduler.schedulePrefetchTask)(cacheKey, treeAtTimeOfPrefetch, includeDynamicData, _segmentcache.PrefetchPriority.Default);
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=prefetch.js.map

View File

@@ -0,0 +1 @@
{"version":3,"sources":["../../../../src/client/components/segment-cache-impl/prefetch.ts"],"sourcesContent":["import type { FlightRouterState } from '../../../server/app-render/types'\nimport { createPrefetchURL } from '../app-router'\nimport { createCacheKey } from './cache-key'\nimport { schedulePrefetchTask } from './scheduler'\nimport { PrefetchPriority } from '../segment-cache'\n\n/**\n * Entrypoint for prefetching a URL into the Segment Cache.\n * @param href - The URL to prefetch. Typically this will come from a <Link>,\n * or router.prefetch. It must be validated before we attempt to prefetch it.\n * @param nextUrl - A special header used by the server for interception routes.\n * Roughly corresponds to the current URL.\n * @param treeAtTimeOfPrefetch - The FlightRouterState at the time the prefetch\n * was requested. This is only used when PPR is disabled.\n * @param includeDynamicData - Whether to prefetch dynamic data, in addition to\n * static data. This is used by <Link prefetch={true}>.\n */\nexport function prefetch(\n href: string,\n nextUrl: string | null,\n treeAtTimeOfPrefetch: FlightRouterState,\n includeDynamicData: boolean\n) {\n const url = createPrefetchURL(href)\n if (url === null) {\n // This href should not be prefetched.\n return\n }\n const cacheKey = createCacheKey(url.href, nextUrl)\n schedulePrefetchTask(\n cacheKey,\n treeAtTimeOfPrefetch,\n includeDynamicData,\n PrefetchPriority.Default\n )\n}\n"],"names":["prefetch","href","nextUrl","treeAtTimeOfPrefetch","includeDynamicData","url","createPrefetchURL","cacheKey","createCacheKey","schedulePrefetchTask","PrefetchPriority","Default"],"mappings":";;;;+BAiBgBA;;;eAAAA;;;2BAhBkB;0BACH;2BACM;8BACJ;AAa1B,SAASA,SACdC,IAAY,EACZC,OAAsB,EACtBC,oBAAuC,EACvCC,kBAA2B;IAE3B,MAAMC,MAAMC,IAAAA,4BAAiB,EAACL;IAC9B,IAAII,QAAQ,MAAM;QAChB,sCAAsC;QACtC;IACF;IACA,MAAME,WAAWC,IAAAA,wBAAc,EAACH,IAAIJ,IAAI,EAAEC;IAC1CO,IAAAA,+BAAoB,EAClBF,UACAJ,sBACAC,oBACAM,8BAAgB,CAACC,OAAO;AAE5B"}

View File

@@ -0,0 +1,109 @@
import type { FlightRouterState } from '../../../server/app-render/types';
import type { RouteCacheKey } from './cache-key';
import { PrefetchPriority } from '../segment-cache';
export type PrefetchTask = {
key: RouteCacheKey;
/**
* The FlightRouterState at the time the task was initiated. This is needed
* when falling back to the non-PPR behavior, which only prefetches up to
* the first loading boundary.
*/
treeAtTimeOfPrefetch: FlightRouterState;
/**
* Whether to prefetch dynamic data, in addition to static data. This is
* used by <Link prefetch={true}>.
*/
includeDynamicData: boolean;
/**
* sortId is an incrementing counter
*
* Newer prefetches are prioritized over older ones, so that as new links
* enter the viewport, they are not starved by older links that are no
* longer relevant. In the future, we can add additional prioritization
* heuristics, like removing prefetches once a link leaves the viewport.
*
* The sortId is assigned when the prefetch is initiated, and reassigned if
* the same task is prefetched again (effectively bumping it to the top of
* the queue).
*
* TODO: We can add additional fields here to indicate what kind of prefetch
* it is. For example, was it initiated by a link? Or was it an imperative
* call? If it was initiated by a link, we can remove it from the queue when
* the link leaves the viewport, but if it was an imperative call, then we
* should keep it in the queue until it's fulfilled.
*
* We can also add priority levels. For example, hovering over a link could
* increase the priority of its prefetch.
*/
sortId: number;
/**
* The priority of the task. Like sortId, this affects the task's position in
* the queue, so it must never be updated without resifting the heap.
*/
priority: PrefetchPriority;
/**
* The phase of the task. Tasks are split into multiple phases so that their
* priority can be adjusted based on what kind of work they're doing.
* Concretely, prefetching the route tree is higher priority than prefetching
* segment data.
*/
phase: PrefetchPhase;
/**
* Temporary state for tracking the currently running task. This is currently
* used to track whether a task deferred some work to run background at
* priority, but we might need it for additional state in the future.
*/
hasBackgroundWork: boolean;
/**
* True if the prefetch was cancelled.
*/
isCanceled: boolean;
/**
* The index of the task in the heap's backing array. Used to efficiently
* change the priority of a task by re-sifting it, which requires knowing
* where it is in the array. This is only used internally by the heap
* algorithm. The naive alternative is indexOf every time a task is queued,
* which has O(n) complexity.
*
* We also use this field to check whether a task is currently in the queue.
*/
_heapIndex: number;
};
/**
* Prefetch tasks are processed in two phases: first the route tree is fetched,
* then the segments. We use this to priortize tasks that have not yet fetched
* the route tree.
*/
declare const enum PrefetchPhase {
RouteTree = 1,
Segments = 0
}
export type PrefetchSubtaskResult<T> = {
/**
* A promise that resolves when the network connection is closed.
*/
closed: Promise<void>;
value: T;
};
/**
* Initiates a prefetch task for the given URL. If a prefetch for the same URL
* is already in progress, this will bump it to the top of the queue.
*
* This is not a user-facing function. By the time this is called, the href is
* expected to be validated and normalized.
*
* @param key The RouteCacheKey to prefetch.
* @param treeAtTimeOfPrefetch The app's current FlightRouterState
* @param includeDynamicData Whether to prefetch dynamic data, in addition to
* static data. This is used by <Link prefetch={true}>.
*/
export declare function schedulePrefetchTask(key: RouteCacheKey, treeAtTimeOfPrefetch: FlightRouterState, includeDynamicData: boolean, priority: PrefetchPriority): PrefetchTask;
export declare function cancelPrefetchTask(task: PrefetchTask): void;
export declare function reschedulePrefetchTask(task: PrefetchTask, treeAtTimeOfPrefetch: FlightRouterState, includeDynamicData: boolean, priority: PrefetchPriority): void;
/**
* Notify the scheduler that we've received new data for an in-progress
* prefetch. The corresponding task will be added back to the queue (unless the
* task has been canceled in the meantime).
*/
export declare function pingPrefetchTask(task: PrefetchTask): void;
export {};

View File

@@ -0,0 +1,839 @@
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
0 && (module.exports = {
cancelPrefetchTask: null,
pingPrefetchTask: null,
reschedulePrefetchTask: null,
schedulePrefetchTask: null
});
function _export(target, all) {
for(var name in all)Object.defineProperty(target, name, {
enumerable: true,
get: all[name]
});
}
_export(exports, {
cancelPrefetchTask: function() {
return cancelPrefetchTask;
},
pingPrefetchTask: function() {
return pingPrefetchTask;
},
reschedulePrefetchTask: function() {
return reschedulePrefetchTask;
},
schedulePrefetchTask: function() {
return schedulePrefetchTask;
}
});
const _matchsegments = require("../match-segments");
const _cache = require("./cache");
const _segmentcache = require("../segment-cache");
const scheduleMicrotask = typeof queueMicrotask === 'function' ? queueMicrotask : (fn)=>Promise.resolve().then(fn).catch((error)=>setTimeout(()=>{
throw error;
}));
const taskHeap = [];
// This is intentionally low so that when a navigation happens, the browser's
// internal network queue is not already saturated with prefetch requests.
const MAX_CONCURRENT_PREFETCH_REQUESTS = 3;
let inProgressRequests = 0;
let sortIdCounter = 0;
let didScheduleMicrotask = false;
function schedulePrefetchTask(key, treeAtTimeOfPrefetch, includeDynamicData, priority) {
// Spawn a new prefetch task
const task = {
key,
treeAtTimeOfPrefetch,
priority,
phase: 1,
hasBackgroundWork: false,
includeDynamicData,
sortId: sortIdCounter++,
isCanceled: false,
_heapIndex: -1
};
heapPush(taskHeap, task);
// Schedule an async task to process the queue.
//
// The main reason we process the queue in an async task is for batching.
// It's common for a single JS task/event to trigger multiple prefetches.
// By deferring to a microtask, we only process the queue once per JS task.
// If they have different priorities, it also ensures they are processed in
// the optimal order.
ensureWorkIsScheduled();
return task;
}
function cancelPrefetchTask(task) {
// Remove the prefetch task from the queue. If the task already completed,
// then this is a no-op.
//
// We must also explicitly mark the task as canceled so that a blocked task
// does not get added back to the queue when it's pinged by the network.
task.isCanceled = true;
heapDelete(taskHeap, task);
}
function reschedulePrefetchTask(task, treeAtTimeOfPrefetch, includeDynamicData, priority) {
// Bump the prefetch task to the top of the queue, as if it were a fresh
// task. This is essentially the same as canceling the task and scheduling
// a new one, except it reuses the original object.
//
// The primary use case is to increase the priority of a Link-initated
// prefetch on hover.
// Un-cancel the task, in case it was previously canceled.
task.isCanceled = false;
task.phase = 1;
// Assign a new sort ID to move it ahead of all other tasks at the same
// priority level. (Higher sort IDs are processed first.)
task.sortId = sortIdCounter++;
task.priority = priority;
task.treeAtTimeOfPrefetch = treeAtTimeOfPrefetch;
task.includeDynamicData = includeDynamicData;
if (task._heapIndex !== -1) {
// The task is already in the queue.
heapResift(taskHeap, task);
} else {
heapPush(taskHeap, task);
}
ensureWorkIsScheduled();
}
function ensureWorkIsScheduled() {
if (didScheduleMicrotask || !hasNetworkBandwidth()) {
// Either we already scheduled a task to process the queue, or there are
// too many concurrent requests in progress. In the latter case, the
// queue will resume processing once more bandwidth is available.
return;
}
didScheduleMicrotask = true;
scheduleMicrotask(processQueueInMicrotask);
}
/**
* Checks if we've exceeded the maximum number of concurrent prefetch requests,
* to avoid saturating the browser's internal network queue. This is a
* cooperative limit — prefetch tasks should check this before issuing
* new requests.
*/ function hasNetworkBandwidth() {
// TODO: Also check if there's an in-progress navigation. We should never
// add prefetch requests to the network queue if an actual navigation is
// taking place, to ensure there's sufficient bandwidth for render-blocking
// data and resources.
return inProgressRequests < MAX_CONCURRENT_PREFETCH_REQUESTS;
}
function spawnPrefetchSubtask(prefetchSubtask) {
// When the scheduler spawns an async task, we don't await its result.
// Instead, the async task writes its result directly into the cache, then
// pings the scheduler to continue.
//
// We process server responses streamingly, so the prefetch subtask will
// likely resolve before we're finished receiving all the data. The subtask
// result includes a promise that resolves once the network connection is
// closed. The scheduler uses this to control network bandwidth by tracking
// and limiting the number of concurrent requests.
inProgressRequests++;
return prefetchSubtask.then((result)=>{
if (result === null) {
// The prefetch task errored before it could start processing the
// network stream. Assume the connection is closed.
onPrefetchConnectionClosed();
return null;
}
// Wait for the connection to close before freeing up more bandwidth.
result.closed.then(onPrefetchConnectionClosed);
return result.value;
});
}
function onPrefetchConnectionClosed() {
inProgressRequests--;
// Notify the scheduler that we have more bandwidth, and can continue
// processing tasks.
ensureWorkIsScheduled();
}
function pingPrefetchTask(task) {
// "Ping" a prefetch that's already in progress to notify it of new data.
if (// Check if prefetch was canceled.
task.isCanceled || // Check if prefetch is already queued.
task._heapIndex !== -1) {
return;
}
// Add the task back to the queue.
heapPush(taskHeap, task);
ensureWorkIsScheduled();
}
function processQueueInMicrotask() {
didScheduleMicrotask = false;
// We aim to minimize how often we read the current time. Since nearly all
// functions in the prefetch scheduler are synchronous, we can read the time
// once and pass it as an argument wherever it's needed.
const now = Date.now();
// Process the task queue until we run out of network bandwidth.
let task = heapPeek(taskHeap);
while(task !== null && hasNetworkBandwidth()){
const route = (0, _cache.readOrCreateRouteCacheEntry)(now, task);
const exitStatus = pingRootRouteTree(now, task, route);
// The `hasBackgroundWork` field is only valid for a single attempt. Reset
// it immediately upon exit.
const hasBackgroundWork = task.hasBackgroundWork;
task.hasBackgroundWork = false;
switch(exitStatus){
case 0:
// The task yielded because there are too many requests in progress.
// Stop processing tasks until we have more bandwidth.
return;
case 1:
// The task is blocked. It needs more data before it can proceed.
// Keep the task out of the queue until the server responds.
heapPop(taskHeap);
// Continue to the next task
task = heapPeek(taskHeap);
continue;
case 2:
if (task.phase === 1) {
// Finished prefetching the route tree. Proceed to prefetching
// the segments.
task.phase = 0;
heapResift(taskHeap, task);
} else if (hasBackgroundWork) {
// The task spawned additional background work. Reschedule the task
// at background priority.
task.priority = _segmentcache.PrefetchPriority.Background;
heapResift(taskHeap, task);
} else {
// The prefetch is complete. Continue to the next task.
heapPop(taskHeap);
}
task = heapPeek(taskHeap);
continue;
default:
exitStatus;
}
}
}
/**
* Check this during a prefetch task to determine if background work can be
* performed. If so, it evaluates to `true`. Otherwise, it returns `false`,
* while also scheduling a background task to run later. Usage:
*
* @example
* if (background(task)) {
* // Perform background-pri work
* }
*/ function background(task) {
if (task.priority === _segmentcache.PrefetchPriority.Background) {
return true;
}
task.hasBackgroundWork = true;
return false;
}
function pingRootRouteTree(now, task, route) {
switch(route.status){
case _cache.EntryStatus.Empty:
{
// Route is not yet cached, and there's no request already in progress.
// Spawn a task to request the route, load it into the cache, and ping
// the task to continue.
// TODO: There are multiple strategies in the <Link> API for prefetching
// a route. Currently we've only implemented the main one: per-segment,
// static-data only.
//
// There's also <Link prefetch={true}> which prefetches both static *and*
// dynamic data. Similarly, we need to fallback to the old, per-page
// behavior if PPR is disabled for a route (via the incremental opt-in).
//
// Those cases will be handled here.
spawnPrefetchSubtask((0, _cache.fetchRouteOnCacheMiss)(route, task));
// If the request takes longer than a minute, a subsequent request should
// retry instead of waiting for this one. When the response is received,
// this value will be replaced by a new value based on the stale time sent
// from the server.
// TODO: We should probably also manually abort the fetch task, to reclaim
// server bandwidth.
route.staleAt = now + 60 * 1000;
// Upgrade to Pending so we know there's already a request in progress
route.status = _cache.EntryStatus.Pending;
// Intentional fallthrough to the Pending branch
}
case _cache.EntryStatus.Pending:
{
// Still pending. We can't start prefetching the segments until the route
// tree has loaded. Add the task to the set of blocked tasks so that it
// is notified when the route tree is ready.
const blockedTasks = route.blockedTasks;
if (blockedTasks === null) {
route.blockedTasks = new Set([
task
]);
} else {
blockedTasks.add(task);
}
return 1;
}
case _cache.EntryStatus.Rejected:
{
// Route tree failed to load. Treat as a 404.
return 2;
}
case _cache.EntryStatus.Fulfilled:
{
if (task.phase !== 0) {
// Do not prefetch segment data until we've entered the segment phase.
return 2;
}
// Recursively fill in the segment tree.
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return 0;
}
const tree = route.tree;
// Determine which fetch strategy to use for this prefetch task.
const fetchStrategy = task.includeDynamicData ? _cache.FetchStrategy.Full : route.isPPREnabled ? _cache.FetchStrategy.PPR : _cache.FetchStrategy.LoadingBoundary;
switch(fetchStrategy){
case _cache.FetchStrategy.PPR:
// Individually prefetch the static shell for each segment. This is
// the default prefetching behavior for static routes, or when PPR is
// enabled. It will not include any dynamic data.
return pingPPRRouteTree(now, task, route, tree);
case _cache.FetchStrategy.Full:
case _cache.FetchStrategy.LoadingBoundary:
{
// Prefetch multiple segments using a single dynamic request.
const spawnedEntries = new Map();
const dynamicRequestTree = diffRouteTreeAgainstCurrent(now, task, route, task.treeAtTimeOfPrefetch, tree, spawnedEntries, fetchStrategy);
const needsDynamicRequest = spawnedEntries.size > 0;
if (needsDynamicRequest) {
// Perform a dynamic prefetch request and populate the cache with
// the result
spawnPrefetchSubtask((0, _cache.fetchSegmentPrefetchesUsingDynamicRequest)(task, route, fetchStrategy, dynamicRequestTree, spawnedEntries));
}
return 2;
}
default:
fetchStrategy;
}
break;
}
default:
{
route;
}
}
return 2;
}
function pingPPRRouteTree(now, task, route, tree) {
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
pingPerSegment(now, task, route, segment, task.key, tree.key);
if (tree.slots !== null) {
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return 0;
}
// Recursively ping the children.
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
const childExitStatus = pingPPRRouteTree(now, task, route, childTree);
if (childExitStatus === 0) {
// Child yielded without finishing.
return 0;
}
}
}
// This segment and all its children have finished prefetching.
return 2;
}
function diffRouteTreeAgainstCurrent(now, task, route, oldTree, newTree, spawnedEntries, fetchStrategy) {
// This is a single recursive traversal that does multiple things:
// - Finds the parts of the target route (newTree) that are not part of
// of the current page (oldTree) by diffing them, using the same algorithm
// as a real navigation.
// - Constructs a request tree (FlightRouterState) that describes which
// segments need to be prefetched and which ones are already cached.
// - Creates a set of pending cache entries for the segments that need to
// be prefetched, so that a subsequent prefetch task does not request the
// same segments again.
const oldTreeChildren = oldTree[1];
const newTreeChildren = newTree.slots;
let requestTreeChildren = {};
if (newTreeChildren !== null) {
for(const parallelRouteKey in newTreeChildren){
const newTreeChild = newTreeChildren[parallelRouteKey];
const newTreeChildSegment = newTreeChild.segment;
const oldTreeChild = oldTreeChildren[parallelRouteKey];
const oldTreeChildSegment = oldTreeChild == null ? void 0 : oldTreeChild[0];
if (oldTreeChildSegment !== undefined && (0, _matchsegments.matchSegment)(newTreeChildSegment, oldTreeChildSegment)) {
// This segment is already part of the current route. Keep traversing.
const requestTreeChild = diffRouteTreeAgainstCurrent(now, task, route, oldTreeChild, newTreeChild, spawnedEntries, fetchStrategy);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
} else {
// This segment is not part of the current route. We're entering a
// part of the tree that we need to prefetch (unless everything is
// already cached).
switch(fetchStrategy){
case _cache.FetchStrategy.LoadingBoundary:
{
// When PPR is disabled, we can't prefetch per segment. We must
// fallback to the old prefetch behavior and send a dynamic request.
// Only routes that include a loading boundary can be prefetched in
// this way.
//
// This is simlar to a "full" prefetch, but we're much more
// conservative about which segments to include in the request.
//
// The server will only render up to the first loading boundary
// inside new part of the tree. If there's no loading boundary, the
// server will never return any data. TODO: When we prefetch the
// route tree, the server should indicate whether there's a loading
// boundary so the client doesn't send a second request for no
// reason.
const requestTreeChild = pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, newTreeChild, null, spawnedEntries);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
break;
}
case _cache.FetchStrategy.Full:
{
// This is a "full" prefetch. Fetch all the data in the tree, both
// static and dynamic. We issue roughly the same request that we
// would during a real navigation. The goal is that once the
// navigation occurs, the router should not have to fetch any
// additional data.
//
// Although the response will include dynamic data, opting into a
// Full prefetch — via <Link prefetch={true}> — implicitly
// instructs the cache to treat the response as "static", or non-
// dynamic, since the whole point is to cache it for
// future navigations.
//
// Construct a tree (currently a FlightRouterState) that represents
// which segments need to be prefetched and which ones are already
// cached. If the tree is empty, then we can exit. Otherwise, we'll
// send the request tree to the server and use the response to
// populate the segment cache.
const requestTreeChild = pingRouteTreeAndIncludeDynamicData(now, task, route, newTreeChild, false, spawnedEntries);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
break;
}
default:
fetchStrategy;
}
}
}
}
const requestTree = [
newTree.segment,
requestTreeChildren,
null,
null,
newTree.isRootLayout
];
return requestTree;
}
function pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, tree, refetchMarkerContext, spawnedEntries) {
// This function is similar to pingRouteTreeAndIncludeDynamicData, except the
// server is only going to return a minimal loading state — it will stop
// rendering at the first loading boundary. Whereas a Full prefetch is
// intentionally aggressive and tries to pretfetch all the data that will be
// needed for a navigation, a LoadingBoundary prefetch is much more
// conservative. For example, it will omit from the request tree any segment
// that is already cached, regardles of whether it's partial or full. By
// contrast, a Full prefetch will refetch partial segments.
// "inside-shared-layout" tells the server where to start looking for a
// loading boundary.
let refetchMarker = refetchMarkerContext === null ? 'inside-shared-layout' : null;
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
switch(segment.status){
case _cache.EntryStatus.Empty:
{
// This segment is not cached. Add a refetch marker so the server knows
// to start rendering here.
// TODO: Instead of a "refetch" marker, we could just omit this subtree's
// FlightRouterState from the request tree. I think this would probably
// already work even without any updates to the server. For consistency,
// though, I'll send the full tree and we'll look into this later as part
// of a larger redesign of the request protocol.
// Add the pending cache entry to the result map.
spawnedEntries.set(tree.key, (0, _cache.upgradeToPendingSegment)(segment, // Set the fetch strategy to LoadingBoundary to indicate that the server
// might not include it in the pending response. If another route is able
// to issue a per-segment request, we'll do that in the background.
_cache.FetchStrategy.LoadingBoundary));
if (refetchMarkerContext !== 'refetch') {
refetchMarker = refetchMarkerContext = 'refetch';
} else {
// There's already a parent with a refetch marker, so we don't need
// to add another one.
}
break;
}
case _cache.EntryStatus.Fulfilled:
{
// The segment is already cached.
// TODO: The server should include a `hasLoading` field as part of the
// route tree prefetch.
if (segment.loading !== null) {
// This segment has a loading boundary, which means the server won't
// render its children. So there's nothing left to prefetch along this
// path. We can bail out.
return (0, _cache.convertRouteTreeToFlightRouterState)(tree);
}
break;
}
case _cache.EntryStatus.Pending:
{
break;
}
case _cache.EntryStatus.Rejected:
{
break;
}
default:
segment;
}
const requestTreeChildren = {};
if (tree.slots !== null) {
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
requestTreeChildren[parallelRouteKey] = pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, childTree, refetchMarkerContext, spawnedEntries);
}
}
const requestTree = [
tree.segment,
requestTreeChildren,
null,
refetchMarker,
tree.isRootLayout
];
return requestTree;
}
function pingRouteTreeAndIncludeDynamicData(now, task, route, tree, isInsideRefetchingParent, spawnedEntries) {
// The tree we're constructing is the same shape as the tree we're navigating
// to. But even though this is a "new" tree, some of the individual segments
// may be cached as a result of other route prefetches.
//
// So we need to find the first uncached segment along each path add an
// explicit "refetch" marker so the server knows where to start rendering.
// Once the server starts rendering along a path, it keeps rendering the
// entire subtree.
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
let spawnedSegment = null;
switch(segment.status){
case _cache.EntryStatus.Empty:
{
// This segment is not cached. Include it in the request.
spawnedSegment = (0, _cache.upgradeToPendingSegment)(segment, _cache.FetchStrategy.Full);
break;
}
case _cache.EntryStatus.Fulfilled:
{
// The segment is already cached.
if (segment.isPartial) {
// The cached segment contians dynamic holes. Since this is a Full
// prefetch, we need to include it in the request.
spawnedSegment = pingFullSegmentRevalidation(now, task, route, segment, tree.key);
}
break;
}
case _cache.EntryStatus.Pending:
case _cache.EntryStatus.Rejected:
{
// There's either another prefetch currently in progress, or the previous
// attempt failed. If it wasn't a Full prefetch, fetch it again.
if (segment.fetchStrategy !== _cache.FetchStrategy.Full) {
spawnedSegment = pingFullSegmentRevalidation(now, task, route, segment, tree.key);
}
break;
}
default:
segment;
}
const requestTreeChildren = {};
if (tree.slots !== null) {
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
requestTreeChildren[parallelRouteKey] = pingRouteTreeAndIncludeDynamicData(now, task, route, childTree, isInsideRefetchingParent || spawnedSegment !== null, spawnedEntries);
}
}
if (spawnedSegment !== null) {
// Add the pending entry to the result map.
spawnedEntries.set(tree.key, spawnedSegment);
}
// Don't bother to add a refetch marker if one is already present in a parent.
const refetchMarker = !isInsideRefetchingParent && spawnedSegment !== null ? 'refetch' : null;
const requestTree = [
tree.segment,
requestTreeChildren,
null,
refetchMarker,
tree.isRootLayout
];
return requestTree;
}
function pingPerSegment(now, task, route, segment, routeKey, segmentKey) {
switch(segment.status){
case _cache.EntryStatus.Empty:
// Upgrade to Pending so we know there's already a request in progress
spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(segment, _cache.FetchStrategy.PPR), routeKey, segmentKey));
break;
case _cache.EntryStatus.Pending:
{
// There's already a request in progress. Depending on what kind of
// request it is, we may want to revalidate it.
switch(segment.fetchStrategy){
case _cache.FetchStrategy.PPR:
case _cache.FetchStrategy.Full:
break;
case _cache.FetchStrategy.LoadingBoundary:
// There's a pending request, but because it's using the old
// prefetching strategy, we can't be sure if it will be fulfilled by
// the response — it might be inside the loading boundary. Perform
// a revalidation, but because it's speculative, wait to do it at
// background priority.
if (background(task)) {
// TODO: Instead of speculatively revalidating, consider including
// `hasLoading` in the route tree prefetch response.
pingPPRSegmentRevalidation(now, task, segment, route, routeKey, segmentKey);
}
break;
default:
segment.fetchStrategy;
}
break;
}
case _cache.EntryStatus.Rejected:
{
// The existing entry in the cache was rejected. Depending on how it
// was originally fetched, we may or may not want to revalidate it.
switch(segment.fetchStrategy){
case _cache.FetchStrategy.PPR:
case _cache.FetchStrategy.Full:
break;
case _cache.FetchStrategy.LoadingBoundary:
// There's a rejected entry, but it was fetched using the loading
// boundary strategy. So the reason it wasn't returned by the server
// might just be because it was inside a loading boundary. Or because
// there was a dynamic rewrite. Revalidate it using the per-
// segment strategy.
//
// Because a rejected segment will definitely prevent the segment (and
// all of its children) from rendering, we perform this revalidation
// immediately instead of deferring it to a background task.
pingPPRSegmentRevalidation(now, task, segment, route, routeKey, segmentKey);
break;
default:
segment.fetchStrategy;
}
break;
}
case _cache.EntryStatus.Fulfilled:
break;
default:
segment;
}
// Segments do not have dependent tasks, so once the prefetch is initiated,
// there's nothing else for us to do (except write the server data into the
// entry, which is handled by `fetchSegmentOnCacheMiss`).
}
function pingPPRSegmentRevalidation(now, task, currentSegment, route, routeKey, segmentKey) {
const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, currentSegment);
switch(revalidatingSegment.status){
case _cache.EntryStatus.Empty:
// Spawn a prefetch request and upsert the segment into the cache
// upon completion.
upsertSegmentOnCompletion(task, route, segmentKey, spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(revalidatingSegment, _cache.FetchStrategy.PPR), routeKey, segmentKey)));
break;
case _cache.EntryStatus.Pending:
break;
case _cache.EntryStatus.Fulfilled:
case _cache.EntryStatus.Rejected:
break;
default:
revalidatingSegment;
}
}
function pingFullSegmentRevalidation(now, task, route, currentSegment, segmentKey) {
const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, currentSegment);
if (revalidatingSegment.status === _cache.EntryStatus.Empty) {
// During a Full prefetch, a single dynamic request is made for all the
// segments that we need. So we don't initiate a request here directly. By
// returning a pending entry from this function, it signals to the caller
// that this segment should be included in the request that's sent to
// the server.
const pendingSegment = (0, _cache.upgradeToPendingSegment)(revalidatingSegment, _cache.FetchStrategy.Full);
upsertSegmentOnCompletion(task, route, segmentKey, (0, _cache.waitForSegmentCacheEntry)(pendingSegment));
return pendingSegment;
} else {
// There's already a revalidation in progress.
const nonEmptyRevalidatingSegment = revalidatingSegment;
if (nonEmptyRevalidatingSegment.fetchStrategy !== _cache.FetchStrategy.Full) {
// The existing revalidation was not fetched using the Full strategy.
// Reset it and start a new revalidation.
const emptySegment = (0, _cache.resetRevalidatingSegmentEntry)(nonEmptyRevalidatingSegment);
const pendingSegment = (0, _cache.upgradeToPendingSegment)(emptySegment, _cache.FetchStrategy.Full);
upsertSegmentOnCompletion(task, route, segmentKey, (0, _cache.waitForSegmentCacheEntry)(pendingSegment));
return pendingSegment;
}
switch(nonEmptyRevalidatingSegment.status){
case _cache.EntryStatus.Pending:
// There's already an in-progress prefetch that includes this segment.
return null;
case _cache.EntryStatus.Fulfilled:
case _cache.EntryStatus.Rejected:
// A previous revalidation attempt finished, but we chose not to replace
// the existing entry in the cache. Don't try again until or unless the
// revalidation entry expires.
return null;
default:
nonEmptyRevalidatingSegment;
return null;
}
}
}
const noop = ()=>{};
function upsertSegmentOnCompletion(task, route, key, promise) {
// Wait for a segment to finish loading, then upsert it into the cache
promise.then((fulfilled)=>{
if (fulfilled !== null) {
// Received new data. Attempt to replace the existing entry in the cache.
const keypath = (0, _cache.getSegmentKeypathForTask)(task, route, key);
(0, _cache.upsertSegmentEntry)(Date.now(), keypath, fulfilled);
}
}, noop);
}
// -----------------------------------------------------------------------------
// The remainder of the module is a MinHeap implementation. Try not to put any
// logic below here unless it's related to the heap algorithm. We can extract
// this to a separate module if/when we need multiple kinds of heaps.
// -----------------------------------------------------------------------------
function compareQueuePriority(a, b) {
// Since the queue is a MinHeap, this should return a positive number if b is
// higher priority than a, and a negative number if a is higher priority
// than b.
// `priority` is an integer, where higher numbers are higher priority.
const priorityDiff = b.priority - a.priority;
if (priorityDiff !== 0) {
return priorityDiff;
}
// If the priority is the same, check which phase the prefetch is in — is it
// prefetching the route tree, or the segments? Route trees are prioritized.
const phaseDiff = b.phase - a.phase;
if (phaseDiff !== 0) {
return phaseDiff;
}
// Finally, check the insertion order. `sortId` is an incrementing counter
// assigned to prefetches. We want to process the newest prefetches first.
return b.sortId - a.sortId;
}
function heapPush(heap, node) {
const index = heap.length;
heap.push(node);
node._heapIndex = index;
heapSiftUp(heap, node, index);
}
function heapPeek(heap) {
return heap.length === 0 ? null : heap[0];
}
function heapPop(heap) {
if (heap.length === 0) {
return null;
}
const first = heap[0];
first._heapIndex = -1;
const last = heap.pop();
if (last !== first) {
heap[0] = last;
last._heapIndex = 0;
heapSiftDown(heap, last, 0);
}
return first;
}
function heapDelete(heap, node) {
const index = node._heapIndex;
if (index !== -1) {
node._heapIndex = -1;
if (heap.length !== 0) {
const last = heap.pop();
if (last !== node) {
heap[index] = last;
last._heapIndex = index;
heapSiftDown(heap, last, index);
}
}
}
}
function heapResift(heap, node) {
const index = node._heapIndex;
if (index !== -1) {
if (index === 0) {
heapSiftDown(heap, node, 0);
} else {
const parentIndex = index - 1 >>> 1;
const parent = heap[parentIndex];
if (compareQueuePriority(parent, node) > 0) {
// The parent is larger. Sift up.
heapSiftUp(heap, node, index);
} else {
// The parent is smaller (or equal). Sift down.
heapSiftDown(heap, node, index);
}
}
}
}
function heapSiftUp(heap, node, i) {
let index = i;
while(index > 0){
const parentIndex = index - 1 >>> 1;
const parent = heap[parentIndex];
if (compareQueuePriority(parent, node) > 0) {
// The parent is larger. Swap positions.
heap[parentIndex] = node;
node._heapIndex = parentIndex;
heap[index] = parent;
parent._heapIndex = index;
index = parentIndex;
} else {
// The parent is smaller. Exit.
return;
}
}
}
function heapSiftDown(heap, node, i) {
let index = i;
const length = heap.length;
const halfLength = length >>> 1;
while(index < halfLength){
const leftIndex = (index + 1) * 2 - 1;
const left = heap[leftIndex];
const rightIndex = leftIndex + 1;
const right = heap[rightIndex];
// If the left or right node is smaller, swap with the smaller of those.
if (compareQueuePriority(left, node) < 0) {
if (rightIndex < length && compareQueuePriority(right, left) < 0) {
heap[index] = right;
right._heapIndex = index;
heap[rightIndex] = node;
node._heapIndex = rightIndex;
index = rightIndex;
} else {
heap[index] = left;
left._heapIndex = index;
heap[leftIndex] = node;
node._heapIndex = leftIndex;
index = leftIndex;
}
} else if (rightIndex < length && compareQueuePriority(right, node) < 0) {
heap[index] = right;
right._heapIndex = index;
heap[rightIndex] = node;
node._heapIndex = rightIndex;
index = rightIndex;
} else {
// Neither child is smaller. Exit.
return;
}
}
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=scheduler.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,19 @@
export type Prefix<T extends any[]> = T extends [infer First, ...infer Rest] ? [] | [First] | [First, ...Prefix<Rest>] : [];
export type TupleMap<Keypath extends Array<any>, V> = {
set(keys: Prefix<Keypath>, value: V): void;
get(keys: Prefix<Keypath>): V | null;
delete(keys: Prefix<Keypath>): void;
};
/**
* Creates a map whose keys are tuples. Tuples are compared per-element. This
* is useful when a key has multiple parts, but you don't want to concatenate
* them into a single string value.
*
* In the Segment Cache, we use this to store cache entries by both their href
* and their Next-URL.
*
* Example:
* map.set(['https://localhost', 'foo/bar/baz'], 'yay');
* map.get(['https://localhost', 'foo/bar/baz']); // returns 'yay'
*/
export declare function createTupleMap<Keypath extends Array<any>, V>(): TupleMap<Keypath, V>;

View File

@@ -0,0 +1,153 @@
// Utility type. Prefix<[A, B, C, D]> matches [A], [A, B], [A, B, C] etc.
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
Object.defineProperty(exports, "createTupleMap", {
enumerable: true,
get: function() {
return createTupleMap;
}
});
function createTupleMap() {
let rootEntry = {
parent: null,
key: null,
hasValue: false,
value: null,
map: null
};
// To optimize successive lookups, we cache the last accessed keypath.
// Although it's not encoded in the type, these are both null or
// both non-null. It uses object equality, so to take advantage of this
// optimization, you must pass the same array instance to each successive
// method call, and you must also not mutate the array between calls.
let lastAccessedEntry = null;
let lastAccessedKeys = null;
function getOrCreateEntry(keys) {
if (lastAccessedKeys === keys) {
return lastAccessedEntry;
}
// Go through each level of keys until we find the entry that matches,
// or create a new one if it doesn't already exist.
let entry = rootEntry;
for(let i = 0; i < keys.length; i++){
const key = keys[i];
let map = entry.map;
if (map !== null) {
const existingEntry = map.get(key);
if (existingEntry !== undefined) {
// Found a match. Keep going.
entry = existingEntry;
continue;
}
} else {
map = new Map();
entry.map = map;
}
// No entry exists yet at this level. Create a new one.
const newEntry = {
parent: entry,
key,
value: null,
hasValue: false,
map: null
};
map.set(key, newEntry);
entry = newEntry;
}
lastAccessedKeys = keys;
lastAccessedEntry = entry;
return entry;
}
function getEntryIfExists(keys) {
if (lastAccessedKeys === keys) {
return lastAccessedEntry;
}
// Go through each level of keys until we find the entry that matches, or
// return null if no match exists.
let entry = rootEntry;
for(let i = 0; i < keys.length; i++){
const key = keys[i];
let map = entry.map;
if (map !== null) {
const existingEntry = map.get(key);
if (existingEntry !== undefined) {
// Found a match. Keep going.
entry = existingEntry;
continue;
}
}
// No entry exists at this level.
return null;
}
lastAccessedKeys = keys;
lastAccessedEntry = entry;
return entry;
}
function set(keys, value) {
const entry = getOrCreateEntry(keys);
entry.hasValue = true;
entry.value = value;
}
function get(keys) {
const entry = getEntryIfExists(keys);
if (entry === null || !entry.hasValue) {
return null;
}
return entry.value;
}
function deleteEntry(keys) {
const entry = getEntryIfExists(keys);
if (entry === null || !entry.hasValue) {
return;
}
// Found a match. Delete it from the cache.
const deletedEntry = entry;
deletedEntry.hasValue = false;
deletedEntry.value = null;
// Check if we can garbage collect the entry.
if (deletedEntry.map === null) {
// Since this entry has no value, and also no child entries, we can
// garbage collect it. Remove it from its parent, and keep garbage
// collecting the parents until we reach a non-empty entry.
// Unlike a `set` operation, these are no longer valid because the entry
// itself is being modified, not just the value it contains.
lastAccessedEntry = null;
lastAccessedKeys = null;
let parent = deletedEntry.parent;
let key = deletedEntry.key;
while(parent !== null){
const parentMap = parent.map;
if (parentMap !== null) {
parentMap.delete(key);
if (parentMap.size === 0) {
// We just removed the last entry in the parent map.
parent.map = null;
if (parent.value === null) {
// The parent node has no child entries, nor does it have a value
// on itself. It can be garbage collected. Keep going.
key = parent.key;
parent = parent.parent;
continue;
}
}
}
break;
}
}
}
return {
set,
get,
delete: deleteEntry
};
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=tuple-map.js.map

File diff suppressed because one or more lines are too long