author | egahlin |
Wed, 02 Oct 2019 19:26:33 +0200 | |
branch | JEP-349-branch |
changeset 58445 | 1893a674db04 |
parent 58197 | 0ef79bd7fb5c |
child 58714 | 737134732b4a |
permissions | -rw-r--r-- |
50113 | 1 |
/* |
57360 | 2 |
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
50113 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. Oracle designates this |
|
8 |
* particular file as subject to the "Classpath" exception as provided |
|
9 |
* by Oracle in the LICENSE file that accompanied this code. |
|
10 |
* |
|
11 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
12 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
14 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
15 |
* accompanied this code). |
|
16 |
* |
|
17 |
* You should have received a copy of the GNU General Public License version |
|
18 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
19 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
20 |
* |
|
21 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
22 |
* or visit www.oracle.com if you need additional information or have any |
|
23 |
* questions. |
|
24 |
*/ |
|
25 |
||
58145
bc54ed8d908a
Move implementation into jdk.jfr.internal.consumer
egahlin
parents:
58129
diff
changeset
|
26 |
package jdk.jfr.internal.consumer; |
50113 | 27 |
|
28 |
import java.io.IOException; |
|
29 |
import java.util.Collection; |
|
30 |
import java.util.List; |
|
57360 | 31 |
import java.util.StringJoiner; |
50113 | 32 |
|
33 |
import jdk.jfr.EventType; |
|
58145
bc54ed8d908a
Move implementation into jdk.jfr.internal.consumer
egahlin
parents:
58129
diff
changeset
|
34 |
import jdk.jfr.consumer.RecordedEvent; |
bc54ed8d908a
Move implementation into jdk.jfr.internal.consumer
egahlin
parents:
58129
diff
changeset
|
35 |
import jdk.jfr.consumer.RecordedObject; |
50113 | 36 |
import jdk.jfr.internal.LogLevel; |
37 |
import jdk.jfr.internal.LogTag; |
|
38 |
import jdk.jfr.internal.Logger; |
|
57373 | 39 |
import jdk.jfr.internal.LongMap; |
50113 | 40 |
import jdk.jfr.internal.MetadataDescriptor; |
41 |
import jdk.jfr.internal.Type; |
|
57360 | 42 |
import jdk.jfr.internal.Utils; |
50113 | 43 |
|
44 |
/** |
|
45 |
* Parses a chunk. |
|
46 |
* |
|
47 |
*/ |
|
58145
bc54ed8d908a
Move implementation into jdk.jfr.internal.consumer
egahlin
parents:
58129
diff
changeset
|
48 |
public final class ChunkParser { |
58129 | 49 |
|
50 |
static final class ParserConfiguration { |
|
58197 | 51 |
private final boolean reuse; |
52 |
private final boolean ordered; |
|
53 |
private final ParserFilter eventFilter; |
|
58129 | 54 |
|
55 |
long filterStart; |
|
56 |
long filterEnd; |
|
57 |
||
58197 | 58 |
ParserConfiguration(long filterStart, long filterEnd, boolean reuse, boolean ordered, ParserFilter filter) { |
58129 | 59 |
this.filterStart = filterStart; |
60 |
this.filterEnd = filterEnd; |
|
61 |
this.reuse = reuse; |
|
62 |
this.ordered = ordered; |
|
63 |
this.eventFilter = filter; |
|
64 |
} |
|
65 |
||
66 |
public ParserConfiguration() { |
|
58146 | 67 |
this(0, Long.MAX_VALUE, false, false, ParserFilter.ACCEPT_ALL); |
58129 | 68 |
} |
58197 | 69 |
|
70 |
public boolean isOrdered() { |
|
71 |
return ordered; |
|
72 |
} |
|
58129 | 73 |
} |
74 |
||
58197 | 75 |
private enum CheckPointType { |
76 |
// Checkpoint that finishes a flush segment |
|
77 |
FLUSH(1), |
|
78 |
// Checkpoint contains chunk header information in the first pool |
|
79 |
CHUNK_HEADER(2), |
|
80 |
// Checkpoint contains only statics that will not change from chunk to chunk |
|
81 |
STATICS(4), |
|
82 |
// Checkpoint contains thread related information |
|
83 |
THREAD(8); |
|
84 |
private final int mask; |
|
85 |
private CheckPointType(int mask) { |
|
86 |
this.mask = mask; |
|
87 |
} |
|
88 |
||
89 |
private boolean is(int flags) { |
|
90 |
return (mask & flags) != 0; |
|
91 |
} |
|
92 |
} |
|
57883
90e867ac8c37
Add parser support for checkpoint cetagorization bits
egahlin
parents:
57870
diff
changeset
|
93 |
|
50113 | 94 |
private static final long CONSTANT_POOL_TYPE_ID = 1; |
57870
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
95 |
private static final String CHUNKHEADER = "jdk.types.ChunkHeader"; |
50113 | 96 |
private final RecordingInput input; |
97 |
private final ChunkHeader chunkHeader; |
|
98 |
private final MetadataDescriptor metadata; |
|
99 |
private final TimeConverter timeConverter; |
|
57360 | 100 |
private final MetadataDescriptor previousMetadata; |
101 |
private final LongMap<ConstantLookup> constantLookups; |
|
102 |
||
103 |
private LongMap<Type> typeMap; |
|
104 |
private LongMap<Parser> parsers; |
|
105 |
private boolean chunkFinished; |
|
58129 | 106 |
|
57861 | 107 |
private Runnable flushOperation; |
58129 | 108 |
private ParserConfiguration configuration; |
50113 | 109 |
|
58129 | 110 |
public ChunkParser(RecordingInput input) throws IOException { |
111 |
this(input, new ParserConfiguration()); |
|
112 |
} |
|
113 |
||
58197 | 114 |
ChunkParser(RecordingInput input, ParserConfiguration pc) throws IOException { |
58129 | 115 |
this(new ChunkHeader(input), null, pc); |
57376 | 116 |
} |
117 |
||
58197 | 118 |
private ChunkParser(ChunkParser previous) throws IOException { |
58129 | 119 |
this(new ChunkHeader(previous.input), previous, new ParserConfiguration()); |
57427 | 120 |
} |
50113 | 121 |
|
58129 | 122 |
private ChunkParser(ChunkHeader header, ChunkParser previous, ParserConfiguration pc) throws IOException { |
123 |
this.configuration = pc; |
|
50113 | 124 |
this.input = header.getInput(); |
125 |
this.chunkHeader = header; |
|
57360 | 126 |
if (previous == null) { |
127 |
this.constantLookups = new LongMap<>(); |
|
128 |
this.previousMetadata = null; |
|
129 |
} else { |
|
130 |
this.constantLookups = previous.constantLookups; |
|
131 |
this.previousMetadata = previous.metadata; |
|
58129 | 132 |
this.configuration = previous.configuration; |
57360 | 133 |
} |
134 |
this.metadata = header.readMetadata(previousMetadata); |
|
52850 | 135 |
this.timeConverter = new TimeConverter(chunkHeader, metadata.getGMTOffset()); |
57360 | 136 |
if (metadata != previousMetadata) { |
137 |
ParserFactory factory = new ParserFactory(metadata, constantLookups, timeConverter); |
|
138 |
parsers = factory.getParsers(); |
|
139 |
typeMap = factory.getTypeMap(); |
|
58129 | 140 |
updateConfiguration(); |
57360 | 141 |
} else { |
142 |
parsers = previous.parsers; |
|
143 |
typeMap = previous.typeMap; |
|
144 |
} |
|
145 |
constantLookups.forEach(c -> c.newPool()); |
|
146 |
fillConstantPools(0); |
|
147 |
constantLookups.forEach(c -> c.getLatestPool().setResolving()); |
|
148 |
constantLookups.forEach(c -> c.getLatestPool().resolve()); |
|
149 |
constantLookups.forEach(c -> c.getLatestPool().setResolved()); |
|
50113 | 150 |
|
151 |
input.position(chunkHeader.getEventStart()); |
|
152 |
} |
|
153 |
||
58129 | 154 |
public ChunkParser nextChunkParser() throws IOException { |
155 |
return new ChunkParser(chunkHeader.nextHeader(), this, configuration); |
|
156 |
} |
|
157 |
||
158 |
private void updateConfiguration() { |
|
159 |
updateConfiguration(configuration, false); |
|
57360 | 160 |
} |
161 |
||
58197 | 162 |
void updateConfiguration(ParserConfiguration configuration, boolean resetEventCache) { |
58129 | 163 |
this.configuration = configuration; |
164 |
parsers.forEach(p -> { |
|
165 |
if (p instanceof EventParser) { |
|
166 |
EventParser ep = (EventParser) p; |
|
167 |
if (resetEventCache) { |
|
168 |
ep.resetCache(); |
|
169 |
} |
|
170 |
String name = ep.getEventType().getName(); |
|
171 |
ep.setOrdered(configuration.ordered); |
|
172 |
ep.setReuse(configuration.reuse); |
|
173 |
ep.setFilterStart(configuration.filterStart); |
|
174 |
ep.setFilterEnd(configuration.filterEnd); |
|
175 |
long threshold = configuration.eventFilter.getThreshold(name); |
|
176 |
if (threshold >= 0) { |
|
177 |
ep.setEnabled(true); |
|
178 |
ep.setThresholdNanos(threshold); |
|
179 |
} else { |
|
180 |
ep.setEnabled(false); |
|
181 |
ep.setThresholdNanos(Long.MAX_VALUE); |
|
182 |
} |
|
183 |
} |
|
184 |
}); |
|
57360 | 185 |
} |
186 |
||
187 |
/** |
|
188 |
* Reads an event and returns null when segment or chunk ends. |
|
57604
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
189 |
* |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
190 |
* @param awaitNewEvents wait for new data. |
57360 | 191 |
*/ |
58197 | 192 |
RecordedEvent readStreamingEvent(boolean awaitNewEvents) throws IOException { |
57360 | 193 |
long absoluteChunkEnd = chunkHeader.getEnd(); |
194 |
while (true) { |
|
195 |
RecordedEvent event = readEvent(); |
|
196 |
if (event != null) { |
|
197 |
return event; |
|
198 |
} |
|
199 |
if (!awaitNewEvents) { |
|
200 |
return null; |
|
201 |
} |
|
202 |
long lastValid = absoluteChunkEnd; |
|
203 |
long metadataPoistion = chunkHeader.getMetataPosition(); |
|
204 |
long contantPosition = chunkHeader.getConstantPoolPosition(); |
|
205 |
chunkFinished = awaitUpdatedHeader(absoluteChunkEnd); |
|
206 |
if (chunkFinished) { |
|
207 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "At chunk end"); |
|
208 |
return null; |
|
209 |
} |
|
210 |
absoluteChunkEnd = chunkHeader.getEnd(); |
|
211 |
// Read metadata and constant pools for the next segment |
|
212 |
if (chunkHeader.getMetataPosition() != metadataPoistion) { |
|
213 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Found new metadata in chunk. Rebuilding types and parsers"); |
|
214 |
MetadataDescriptor metadata = chunkHeader.readMetadata(previousMetadata); |
|
215 |
ParserFactory factory = new ParserFactory(metadata, constantLookups, timeConverter); |
|
216 |
parsers = factory.getParsers(); |
|
217 |
typeMap = factory.getTypeMap(); |
|
58129 | 218 |
updateConfiguration();; |
57360 | 219 |
} |
220 |
if (contantPosition != chunkHeader.getConstantPoolPosition()) { |
|
221 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Found new constant pool data. Filling up pools with new values"); |
|
222 |
constantLookups.forEach(c -> c.getLatestPool().setAllResolved(false)); |
|
223 |
fillConstantPools(contantPosition + chunkHeader.getAbsoluteChunkStart()); |
|
224 |
constantLookups.forEach(c -> c.getLatestPool().setResolving()); |
|
225 |
constantLookups.forEach(c -> c.getLatestPool().resolve()); |
|
226 |
constantLookups.forEach(c -> c.getLatestPool().setResolved()); |
|
227 |
} |
|
228 |
input.position(lastValid); |
|
229 |
} |
|
230 |
} |
|
231 |
||
232 |
/** |
|
233 |
* Reads an event and returns null when the chunk ends |
|
234 |
*/ |
|
50113 | 235 |
public RecordedEvent readEvent() throws IOException { |
57360 | 236 |
long absoluteChunkEnd = chunkHeader.getEnd(); |
50113 | 237 |
while (input.position() < absoluteChunkEnd) { |
238 |
long pos = input.position(); |
|
239 |
int size = input.readInt(); |
|
240 |
if (size == 0) { |
|
241 |
throw new IOException("Event can't have zero size"); |
|
242 |
} |
|
243 |
long typeId = input.readLong(); |
|
57861 | 244 |
|
245 |
if (typeId != 0) { // Not metadata event |
|
57360 | 246 |
Parser p = parsers.get(typeId); |
247 |
if (p instanceof EventParser) { |
|
248 |
EventParser ep = (EventParser) p; |
|
249 |
RecordedEvent event = ep.parse(input); |
|
250 |
if (event != null) { |
|
251 |
input.position(pos + size); |
|
252 |
return event; |
|
253 |
} |
|
50113 | 254 |
} |
57861 | 255 |
if (typeId == 1 && flushOperation != null) { // checkpoint event |
256 |
parseCheckpoint(); |
|
257 |
} |
|
50113 | 258 |
} |
259 |
input.position(pos + size); |
|
260 |
} |
|
261 |
return null; |
|
262 |
} |
|
263 |
||
57861 | 264 |
private void parseCheckpoint() throws IOException { |
265 |
// Content has been parsed previously. This |
|
58020
f082177c5023
Improved handling of Thread.interrupt() + cleanup
egahlin
parents:
57985
diff
changeset
|
266 |
// is to trigger flush |
57861 | 267 |
input.readLong(); // timestamp |
268 |
input.readLong(); // duration |
|
269 |
input.readLong(); // delta |
|
58197 | 270 |
byte typeFlags = input.readByte(); |
271 |
if (CheckPointType.FLUSH.is(typeFlags)) { |
|
57861 | 272 |
flushOperation.run(); |
273 |
} |
|
274 |
} |
|
275 |
||
57360 | 276 |
private boolean awaitUpdatedHeader(long absoluteChunkEnd) throws IOException { |
57604
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
277 |
if (Logger.shouldLog(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO)) { |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
278 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Waiting for more data (streaming). Read so far: " + chunkHeader.getChunkSize() + " bytes"); |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
279 |
} |
57360 | 280 |
while (true) { |
281 |
chunkHeader.refresh(); |
|
282 |
if (absoluteChunkEnd != chunkHeader.getEnd()) { |
|
283 |
return false; |
|
284 |
} |
|
285 |
if (chunkHeader.isFinished()) { |
|
286 |
return true; |
|
287 |
} |
|
58445
1893a674db04
Add TestRecusrive and only skip events for DirectoryStream
egahlin
parents:
58197
diff
changeset
|
288 |
Utils.waitFlush(1000); |
57360 | 289 |
} |
290 |
} |
|
291 |
||
292 |
private void fillConstantPools(long abortCP) throws IOException { |
|
293 |
long thisCP = chunkHeader.getConstantPoolPosition() + chunkHeader.getAbsoluteChunkStart(); |
|
294 |
long lastCP = -1; |
|
295 |
long delta = -1; |
|
57604
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
296 |
boolean logTrace = Logger.shouldLog(LogTag.JFR_SYSTEM_PARSER, LogLevel.TRACE); |
57360 | 297 |
while (thisCP != abortCP && delta != 0) { |
298 |
input.position(thisCP); |
|
299 |
lastCP = thisCP; |
|
50113 | 300 |
int size = input.readInt(); // size |
301 |
long typeId = input.readLong(); |
|
302 |
if (typeId != CONSTANT_POOL_TYPE_ID) { |
|
57360 | 303 |
throw new IOException("Expected check point event (id = 1) at position " + lastCP + ", but found type id = " + typeId); |
50113 | 304 |
} |
305 |
input.readLong(); // timestamp |
|
306 |
input.readLong(); // duration |
|
57360 | 307 |
delta = input.readLong(); |
308 |
thisCP += delta; |
|
50113 | 309 |
boolean flush = input.readBoolean(); |
310 |
int poolCount = input.readInt(); |
|
57360 | 311 |
final long logLastCP = lastCP; |
312 |
final long logDelta = delta; |
|
57604
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
313 |
if (logTrace) { |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
314 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.TRACE, () -> { |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
315 |
return "New constant pool: startPosition=" + logLastCP + ", size=" + size + ", deltaToNext=" + logDelta + ", flush=" + flush + ", poolCount=" + poolCount; |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
316 |
}); |
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
317 |
} |
50113 | 318 |
for (int i = 0; i < poolCount; i++) { |
319 |
long id = input.readLong(); // type id |
|
57360 | 320 |
ConstantLookup lookup = constantLookups.get(id); |
50113 | 321 |
Type type = typeMap.get(id); |
57360 | 322 |
if (lookup == null) { |
50113 | 323 |
if (type == null) { |
57360 | 324 |
throw new IOException( |
325 |
"Error parsing constant pool type " + getName(id) + " at position " + input.position() + " at check point between [" + lastCP + ", " + lastCP + size + "]"); |
|
50113 | 326 |
} |
57870
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
327 |
if (type.getName() != CHUNKHEADER) { |
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
328 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Found constant pool(" + id + ") that is never used"); |
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
329 |
} |
57360 | 330 |
ConstantMap pool = new ConstantMap(ObjectFactory.create(type, timeConverter), type.getName()); |
57870
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
331 |
lookup = new ConstantLookup(pool, type); |
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
332 |
constantLookups.put(type.getId(), lookup); |
50113 | 333 |
} |
57360 | 334 |
Parser parser = parsers.get(id); |
50113 | 335 |
if (parser == null) { |
336 |
throw new IOException("Could not find constant pool type with id = " + id); |
|
337 |
} |
|
338 |
try { |
|
339 |
int count = input.readInt(); |
|
57360 | 340 |
if (count == 0) { |
341 |
throw new InternalError("Pool " + type.getName() + " must contain at least one element "); |
|
342 |
} |
|
57604
838f9a7635b6
Cleaner stream reconfiguration + reduced allocation in JFR framework
egahlin
parents:
57467
diff
changeset
|
343 |
if (logTrace) { |
57360 | 344 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.TRACE, "Constant Pool " + i + ": " + type.getName()); |
345 |
} |
|
50113 | 346 |
for (int j = 0; j < count; j++) { |
347 |
long key = input.readLong(); |
|
57870
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
348 |
Object resolved = lookup.getPreviousResolved(key); |
00860d9caf4d
New metadata system for oldobjects built on top of simplified tagging model. Caching and serialization improvements. Flushpoint checkpoint with chunkheader contents.
mgronlun
parents:
57861
diff
changeset
|
349 |
if (resolved == null) { |
57360 | 350 |
Object v = parser.parse(input); |
351 |
logConstant(key, v, false); |
|
352 |
lookup.getLatestPool().put(key, v); |
|
57467 | 353 |
} else { |
354 |
parser.skip(input); |
|
355 |
logConstant(key, resolved, true); |
|
356 |
lookup.getLatestPool().putResolved(key, resolved); |
|
357 |
} |
|
50113 | 358 |
} |
359 |
} catch (Exception e) { |
|
57360 | 360 |
throw new IOException("Error parsing constant pool type " + getName(id) + " at position " + input.position() + " at check point between [" + lastCP + ", " + lastCP + size + "]", |
361 |
e); |
|
50113 | 362 |
} |
363 |
} |
|
57360 | 364 |
if (input.position() != lastCP + size) { |
50113 | 365 |
throw new IOException("Size of check point event doesn't match content"); |
366 |
} |
|
367 |
} |
|
368 |
} |
|
369 |
||
57360 | 370 |
private void logConstant(long key, Object v, boolean preresolved) { |
371 |
if (!Logger.shouldLog(LogTag.JFR_SYSTEM_PARSER, LogLevel.TRACE)) { |
|
372 |
return; |
|
373 |
} |
|
374 |
String valueText; |
|
375 |
if (v.getClass().isArray()) { |
|
376 |
Object[] array = (Object[]) v; |
|
377 |
StringJoiner sj = new StringJoiner(", ", "{", "}"); |
|
378 |
for (int i = 0; i < array.length; i++) { |
|
379 |
sj.add(textify(array[i])); |
|
380 |
} |
|
381 |
valueText = sj.toString(); |
|
382 |
} else { |
|
383 |
valueText = textify(v); |
|
384 |
} |
|
385 |
String suffix = preresolved ? " (presolved)" :""; |
|
386 |
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.TRACE, "Constant: " + key + " = " + valueText + suffix); |
|
387 |
} |
|
388 |
||
389 |
private String textify(Object o) { |
|
390 |
if (o == null) { // should not happen |
|
391 |
return "null"; |
|
392 |
} |
|
393 |
if (o instanceof String) { |
|
394 |
return "\"" + String.valueOf(o) + "\""; |
|
395 |
} |
|
396 |
if (o instanceof RecordedObject) { |
|
397 |
return o.getClass().getName(); |
|
398 |
} |
|
399 |
if (o.getClass().isArray()) { |
|
400 |
Object[] array = (Object[]) o; |
|
401 |
if (array.length > 0) { |
|
402 |
return textify(array[0]) + "[]"; // can it be recursive? |
|
403 |
} |
|
404 |
} |
|
405 |
return String.valueOf(o); |
|
406 |
} |
|
407 |
||
50113 | 408 |
private String getName(long id) { |
409 |
Type type = typeMap.get(id); |
|
52850 | 410 |
return type == null ? ("unknown(" + id + ")") : type.getName(); |
50113 | 411 |
} |
412 |
||
413 |
public Collection<Type> getTypes() { |
|
414 |
return metadata.getTypes(); |
|
415 |
} |
|
416 |
||
417 |
public List<EventType> getEventTypes() { |
|
418 |
return metadata.getEventTypes(); |
|
419 |
} |
|
420 |
||
57360 | 421 |
public boolean isLastChunk() throws IOException { |
50113 | 422 |
return chunkHeader.isLastChunk(); |
423 |
} |
|
424 |
||
58197 | 425 |
ChunkParser newChunkParser() throws IOException { |
57427 | 426 |
return new ChunkParser(this); |
427 |
} |
|
428 |
||
57360 | 429 |
public boolean isChunkFinished() { |
430 |
return chunkFinished; |
|
50113 | 431 |
} |
57380
6a7e7743b82f
setOrdered and setReuse implemented for file stream, incl. unit tests
egahlin
parents:
57377
diff
changeset
|
432 |
|
57861 | 433 |
public void setFlushOperation(Runnable flushOperation) { |
434 |
this.flushOperation = flushOperation; |
|
435 |
} |
|
436 |
||
57640
46a77fccd251
Fix first(...), next(...) and last() path from RepositoryFiles
egahlin
parents:
57604
diff
changeset
|
437 |
public long getChunkDuration() { |
46a77fccd251
Fix first(...), next(...) and last() path from RepositoryFiles
egahlin
parents:
57604
diff
changeset
|
438 |
return chunkHeader.getDurationNanos(); |
46a77fccd251
Fix first(...), next(...) and last() path from RepositoryFiles
egahlin
parents:
57604
diff
changeset
|
439 |
} |
57690
9316d02dd4a5
Add EventStream::setEndTime(...) and a first stab at priviliged access to local repository
egahlin
parents:
57640
diff
changeset
|
440 |
|
9316d02dd4a5
Add EventStream::setEndTime(...) and a first stab at priviliged access to local repository
egahlin
parents:
57640
diff
changeset
|
441 |
public long getStartNanos() { |
9316d02dd4a5
Add EventStream::setEndTime(...) and a first stab at priviliged access to local repository
egahlin
parents:
57640
diff
changeset
|
442 |
return chunkHeader.getStartNanos(); |
9316d02dd4a5
Add EventStream::setEndTime(...) and a first stab at priviliged access to local repository
egahlin
parents:
57640
diff
changeset
|
443 |
} |
58129 | 444 |
|
50113 | 445 |
} |