@@ -14,13 +14,22 @@ import (
14
14
"math/bits"
15
15
"runtime"
16
16
"sync"
17
+ "time"
17
18
18
19
"golang.org/x/sync/errgroup"
19
20
"golang.org/x/tools/gopls/internal/lsp/source"
21
+ "golang.org/x/tools/gopls/internal/span"
20
22
"golang.org/x/tools/internal/memoize"
21
23
"golang.org/x/tools/internal/tokeninternal"
22
24
)
23
25
26
+ // This file contains an implementation of an LRU parse cache, that offsets the
27
+ // base token.Pos value of each cached file so that they may be later described
28
+ // by a single dedicated FileSet.
29
+ //
30
+ // This is achieved by tracking a monotonic offset in the token.Pos space, that
31
+ // is incremented before parsing allow room for the resulting parsed file.
32
+
24
33
// reservedForParsing defines the room in the token.Pos space reserved for
25
34
// cached parsed files.
26
35
//
@@ -58,21 +67,11 @@ func fileSetWithBase(base int) *token.FileSet {
58
67
return fset
59
68
}
60
69
61
- // This file contains an implementation of a bounded-size parse cache, that
62
- // offsets the base token.Pos value of each cached file so that they may be
63
- // later described by a single dedicated FileSet.
64
- //
65
- // This is achieved by tracking a monotonic offset in the token.Pos space, that
66
- // is incremented before parsing allow room for the resulting parsed file.
67
-
68
- // Keep 200 recently parsed files, based on the following rationale:
69
- // - One of the most important benefits of caching is avoiding re-parsing
70
- // everything in a package when working on a single file. No packages in
71
- // Kubernetes have > 200 files (only one has > 100).
72
- // - Experience has shown that ~1000 parsed files can use noticeable space.
73
- // 200 feels like a sweet spot between limiting cache size and optimizing
74
- // cache hits for low-latency operations.
75
- const parseCacheMaxFiles = 200
70
+ const (
71
+ // Always keep 100 recent files, independent of their wall-clock age, to
72
+ // optimize the case where the user resumes editing after a delay.
73
+ parseCacheMinFiles = 100
74
+ )
76
75
77
76
// parsePadding is additional padding allocated to allow for increases in
78
77
// length (such as appending missing braces) caused by fixAST.
@@ -89,31 +88,55 @@ const parseCacheMaxFiles = 200
89
88
// This value is mutable for testing, so that we can exercise the slow path.
90
89
var parsePadding = 1000 // mutable for testing
91
90
92
- // A parseCache holds a bounded number of recently accessed parsed Go files. As
93
- // new files are stored, older files may be evicted from the cache.
91
+ // A parseCache holds recently accessed parsed Go files. After new files are
92
+ // stored, older files may be evicted from the cache via garbage collection .
94
93
//
95
94
// The parseCache.parseFiles method exposes a batch API for parsing (and
96
95
// caching) multiple files. This is necessary for type-checking, where files
97
96
// must be parsed in a common fileset.
98
97
type parseCache struct {
98
+ maxAge time.Duration // interval at which to collect expired cache entries
99
+ done chan struct {} // closed when GC is stopped
100
+
99
101
mu sync.Mutex
100
102
m map [parseKey ]* parseCacheEntry
101
103
lru queue // min-atime priority queue of *parseCacheEntry
102
104
clock uint64 // clock time, incremented when the cache is updated
103
105
nextBase int // base offset for the next parsed file
104
106
}
105
107
108
+ // newParseCache creates a new parse cache and starts a goroutine to garbage
109
+ // collect old entries that are older than maxAge.
110
+ //
111
+ // Callers must call parseCache.stop when the parse cache is no longer in use.
112
+ func newParseCache (maxAge time.Duration ) * parseCache {
113
+ c := & parseCache {
114
+ maxAge : maxAge ,
115
+ m : make (map [parseKey ]* parseCacheEntry ),
116
+ done : make (chan struct {}),
117
+ }
118
+ go c .gc ()
119
+ return c
120
+ }
121
+
122
+ // stop causes the GC goroutine to exit.
123
+ func (c * parseCache ) stop () {
124
+ close (c .done )
125
+ }
126
+
106
127
// parseKey uniquely identifies a parsed Go file.
107
128
type parseKey struct {
108
- file source. FileIdentity
129
+ uri span. URI
109
130
mode parser.Mode
110
131
}
111
132
112
133
type parseCacheEntry struct {
113
134
key parseKey
135
+ hash source.Hash
114
136
promise * memoize.Promise // memoize.Promise[*source.ParsedGoFile]
115
- atime uint64 // clock time of last access
116
- lruIndex int
137
+ atime uint64 // clock time of last access, for use in LRU sorting
138
+ walltime time.Time // actual time of last access, for use in time-based eviction; too coarse for LRU on some systems
139
+ lruIndex int // owned by the queue implementation
117
140
}
118
141
119
142
// startParse prepares a parsing pass, creating new promises in the cache for
@@ -131,6 +154,7 @@ func (c *parseCache) startParse(mode parser.Mode, fhs ...source.FileHandle) ([]*
131
154
//
132
155
// All entries parsed from a single call get the same access time.
133
156
c .clock ++
157
+ walltime := time .Now ()
134
158
135
159
// Read file data and collect cacheable files.
136
160
var (
@@ -149,15 +173,22 @@ func (c *parseCache) startParse(mode parser.Mode, fhs ...source.FileHandle) ([]*
149
173
data [i ] = content
150
174
151
175
key := parseKey {
152
- file : fh .FileIdentity (),
176
+ uri : fh .URI (),
153
177
mode : mode ,
154
178
}
155
179
156
- if e , ok := c .m [key ]; ok { // cache hit
157
- e .atime = c .clock
158
- heap .Fix (& c .lru , e .lruIndex )
159
- promises [i ] = e .promise
160
- continue
180
+ if e , ok := c .m [key ]; ok {
181
+ if e .hash == fh .FileIdentity ().Hash { // cache hit
182
+ e .atime = c .clock
183
+ e .walltime = walltime
184
+ heap .Fix (& c .lru , e .lruIndex )
185
+ promises [i ] = e .promise
186
+ continue
187
+ } else {
188
+ // A cache hit, for a different version. Delete it.
189
+ delete (c .m , e .key )
190
+ heap .Remove (& c .lru , e .lruIndex )
191
+ }
161
192
}
162
193
163
194
uri := fh .URI ()
@@ -200,21 +231,14 @@ func (c *parseCache) startParse(mode parser.Mode, fhs ...source.FileHandle) ([]*
200
231
})
201
232
promises [i ] = promise
202
233
203
- var e * parseCacheEntry
204
- if len (c .lru ) < parseCacheMaxFiles {
205
- // add new entry
206
- e = new (parseCacheEntry )
207
- if c .m == nil {
208
- c .m = make (map [parseKey ]* parseCacheEntry )
209
- }
210
- } else {
211
- // evict oldest entry
212
- e = heap .Pop (& c .lru ).(* parseCacheEntry )
213
- delete (c .m , e .key )
234
+ // add new entry; entries are gc'ed asynchronously
235
+ e := & parseCacheEntry {
236
+ key : key ,
237
+ hash : fh .FileIdentity ().Hash ,
238
+ promise : promise ,
239
+ atime : c .clock ,
240
+ walltime : walltime ,
214
241
}
215
- e .key = key
216
- e .promise = promise
217
- e .atime = c .clock
218
242
c .m [e .key ] = e
219
243
heap .Push (& c .lru , e )
220
244
}
@@ -226,6 +250,38 @@ func (c *parseCache) startParse(mode parser.Mode, fhs ...source.FileHandle) ([]*
226
250
return promises , firstReadError
227
251
}
228
252
253
+ func (c * parseCache ) gc () {
254
+ const period = 10 * time .Second // gc period
255
+ timer := time .NewTimer (period )
256
+ defer timer .Stop ()
257
+
258
+ for {
259
+ select {
260
+ case <- c .done :
261
+ return
262
+ case <- timer .C :
263
+ }
264
+
265
+ c .gcOnce ()
266
+ }
267
+ }
268
+
269
+ func (c * parseCache ) gcOnce () {
270
+ now := time .Now ()
271
+ c .mu .Lock ()
272
+ defer c .mu .Unlock ()
273
+
274
+ for len (c .m ) > parseCacheMinFiles {
275
+ e := heap .Pop (& c .lru ).(* parseCacheEntry )
276
+ if now .Sub (e .walltime ) > c .maxAge {
277
+ delete (c .m , e .key )
278
+ } else {
279
+ heap .Push (& c .lru , e )
280
+ break
281
+ }
282
+ }
283
+ }
284
+
229
285
// allocateSpace reserves the next n bytes of token.Pos space in the
230
286
// cache.
231
287
//
0 commit comments