1 /* |
|
2 * Copyright © 2007,2008,2009,2010 Red Hat, Inc. |
|
3 * Copyright © 2012 Google, Inc. |
|
4 * |
|
5 * This is part of HarfBuzz, a text shaping library. |
|
6 * |
|
7 * Permission is hereby granted, without written agreement and without |
|
8 * license or royalty fees, to use, copy, modify, and distribute this |
|
9 * software and its documentation for any purpose, provided that the |
|
10 * above copyright notice and the following two paragraphs appear in |
|
11 * all copies of this software. |
|
12 * |
|
13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
|
14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
|
15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
|
16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
|
17 * DAMAGE. |
|
18 * |
|
19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
|
20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
|
21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
|
22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
|
23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
|
24 * |
|
25 * Red Hat Author(s): Behdad Esfahbod |
|
26 * Google Author(s): Behdad Esfahbod |
|
27 */ |
|
28 |
|
29 #ifndef HB_OPEN_TYPE_PRIVATE_HH |
|
30 #define HB_OPEN_TYPE_PRIVATE_HH |
|
31 |
|
32 #include "hb-private.hh" |
|
33 #include "hb-debug.hh" |
|
34 #include "hb-blob-private.hh" |
|
35 #include "hb-face-private.hh" |
|
36 |
|
37 |
|
38 namespace OT { |
|
39 |
|
40 |
|
41 |
|
42 /* |
|
43 * Casts |
|
44 */ |
|
45 |
|
46 /* Cast to struct T, reference to reference */ |
|
47 template<typename Type, typename TObject> |
|
48 static inline const Type& CastR(const TObject &X) |
|
49 { return reinterpret_cast<const Type&> (X); } |
|
50 template<typename Type, typename TObject> |
|
51 static inline Type& CastR(TObject &X) |
|
52 { return reinterpret_cast<Type&> (X); } |
|
53 |
|
54 /* Cast to struct T, pointer to pointer */ |
|
55 template<typename Type, typename TObject> |
|
56 static inline const Type* CastP(const TObject *X) |
|
57 { return reinterpret_cast<const Type*> (X); } |
|
58 template<typename Type, typename TObject> |
|
59 static inline Type* CastP(TObject *X) |
|
60 { return reinterpret_cast<Type*> (X); } |
|
61 |
|
62 /* StructAtOffset<T>(P,Ofs) returns the struct T& that is placed at memory |
|
63 * location pointed to by P plus Ofs bytes. */ |
|
64 template<typename Type> |
|
65 static inline const Type& StructAtOffset(const void *P, unsigned int offset) |
|
66 { return * reinterpret_cast<const Type*> ((const char *) P + offset); } |
|
67 template<typename Type> |
|
68 static inline Type& StructAtOffset(void *P, unsigned int offset) |
|
69 { return * reinterpret_cast<Type*> ((char *) P + offset); } |
|
70 |
|
71 /* StructAfter<T>(X) returns the struct T& that is placed after X. |
|
72 * Works with X of variable size also. X must implement get_size() */ |
|
73 template<typename Type, typename TObject> |
|
74 static inline const Type& StructAfter(const TObject &X) |
|
75 { return StructAtOffset<Type>(&X, X.get_size()); } |
|
76 template<typename Type, typename TObject> |
|
77 static inline Type& StructAfter(TObject &X) |
|
78 { return StructAtOffset<Type>(&X, X.get_size()); } |
|
79 |
|
80 |
|
81 |
|
82 /* |
|
83 * Size checking |
|
84 */ |
|
85 |
|
86 /* Check _assertion in a method environment */ |
|
87 #define _DEFINE_INSTANCE_ASSERTION1(_line, _assertion) \ |
|
88 inline void _instance_assertion_on_line_##_line (void) const \ |
|
89 { \ |
|
90 static_assert ((_assertion), ""); \ |
|
91 ASSERT_INSTANCE_POD (*this); /* Make sure it's POD. */ \ |
|
92 } |
|
93 # define _DEFINE_INSTANCE_ASSERTION0(_line, _assertion) _DEFINE_INSTANCE_ASSERTION1 (_line, _assertion) |
|
94 # define DEFINE_INSTANCE_ASSERTION(_assertion) _DEFINE_INSTANCE_ASSERTION0 (__LINE__, _assertion) |
|
95 |
|
96 /* Check that _code compiles in a method environment */ |
|
97 #define _DEFINE_COMPILES_ASSERTION1(_line, _code) \ |
|
98 inline void _compiles_assertion_on_line_##_line (void) const \ |
|
99 { _code; } |
|
100 # define _DEFINE_COMPILES_ASSERTION0(_line, _code) _DEFINE_COMPILES_ASSERTION1 (_line, _code) |
|
101 # define DEFINE_COMPILES_ASSERTION(_code) _DEFINE_COMPILES_ASSERTION0 (__LINE__, _code) |
|
102 |
|
103 |
|
104 #define DEFINE_SIZE_STATIC(size) \ |
|
105 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size)); \ |
|
106 static const unsigned int static_size = (size); \ |
|
107 static const unsigned int min_size = (size); \ |
|
108 inline unsigned int get_size (void) const { return (size); } |
|
109 |
|
110 #define DEFINE_SIZE_UNION(size, _member) \ |
|
111 DEFINE_INSTANCE_ASSERTION (0*sizeof(this->u._member.static_size) + sizeof(this->u._member) == (size)); \ |
|
112 static const unsigned int min_size = (size) |
|
113 |
|
114 #define DEFINE_SIZE_MIN(size) \ |
|
115 DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)); \ |
|
116 static const unsigned int min_size = (size) |
|
117 |
|
118 #define DEFINE_SIZE_ARRAY(size, array) \ |
|
119 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (array[0])); \ |
|
120 DEFINE_COMPILES_ASSERTION ((void) array[0].static_size) \ |
|
121 static const unsigned int min_size = (size) |
|
122 |
|
123 #define DEFINE_SIZE_ARRAY2(size, array1, array2) \ |
|
124 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (this->array1[0]) + sizeof (this->array2[0])); \ |
|
125 DEFINE_COMPILES_ASSERTION ((void) array1[0].static_size; (void) array2[0].static_size) \ |
|
126 static const unsigned int min_size = (size) |
|
127 |
|
128 |
|
129 |
|
130 /* |
|
131 * Dispatch |
|
132 */ |
|
133 |
|
134 template <typename Context, typename Return, unsigned int MaxDebugDepth> |
|
135 struct hb_dispatch_context_t |
|
136 { |
|
137 static const unsigned int max_debug_depth = MaxDebugDepth; |
|
138 typedef Return return_t; |
|
139 template <typename T, typename F> |
|
140 inline bool may_dispatch (const T *obj, const F *format) { return true; } |
|
141 static return_t no_dispatch_return_value (void) { return Context::default_return_value (); } |
|
142 }; |
|
143 |
|
144 |
|
145 /* |
|
146 * Sanitize |
|
147 */ |
|
148 |
|
149 /* This limits sanitizing time on really broken fonts. */ |
|
150 #ifndef HB_SANITIZE_MAX_EDITS |
|
151 #define HB_SANITIZE_MAX_EDITS 32 |
|
152 #endif |
|
153 #ifndef HB_SANITIZE_MAX_OPS_FACTOR |
|
154 #define HB_SANITIZE_MAX_OPS_FACTOR 8 |
|
155 #endif |
|
156 #ifndef HB_SANITIZE_MAX_OPS_MIN |
|
157 #define HB_SANITIZE_MAX_OPS_MIN 16384 |
|
158 #endif |
|
159 |
|
160 struct hb_sanitize_context_t : |
|
161 hb_dispatch_context_t<hb_sanitize_context_t, bool, HB_DEBUG_SANITIZE> |
|
162 { |
|
163 inline hb_sanitize_context_t (void) : |
|
164 debug_depth (0), |
|
165 start (nullptr), end (nullptr), |
|
166 writable (false), edit_count (0), max_ops (0), |
|
167 blob (nullptr), |
|
168 num_glyphs (0) {} |
|
169 |
|
170 inline const char *get_name (void) { return "SANITIZE"; } |
|
171 template <typename T, typename F> |
|
172 inline bool may_dispatch (const T *obj, const F *format) |
|
173 { return format->sanitize (this); } |
|
174 template <typename T> |
|
175 inline return_t dispatch (const T &obj) { return obj.sanitize (this); } |
|
176 static return_t default_return_value (void) { return true; } |
|
177 static return_t no_dispatch_return_value (void) { return false; } |
|
178 bool stop_sublookup_iteration (const return_t r) const { return !r; } |
|
179 |
|
180 inline void init (hb_blob_t *b) |
|
181 { |
|
182 this->blob = hb_blob_reference (b); |
|
183 this->writable = false; |
|
184 } |
|
185 |
|
186 inline void start_processing (void) |
|
187 { |
|
188 this->start = hb_blob_get_data (this->blob, nullptr); |
|
189 this->end = this->start + this->blob->length; |
|
190 assert (this->start <= this->end); /* Must not overflow. */ |
|
191 this->max_ops = MAX ((unsigned int) (this->end - this->start) * HB_SANITIZE_MAX_OPS_FACTOR, |
|
192 (unsigned) HB_SANITIZE_MAX_OPS_MIN); |
|
193 this->edit_count = 0; |
|
194 this->debug_depth = 0; |
|
195 |
|
196 DEBUG_MSG_LEVEL (SANITIZE, start, 0, +1, |
|
197 "start [%p..%p] (%lu bytes)", |
|
198 this->start, this->end, |
|
199 (unsigned long) (this->end - this->start)); |
|
200 } |
|
201 |
|
202 inline void end_processing (void) |
|
203 { |
|
204 DEBUG_MSG_LEVEL (SANITIZE, this->start, 0, -1, |
|
205 "end [%p..%p] %u edit requests", |
|
206 this->start, this->end, this->edit_count); |
|
207 |
|
208 hb_blob_destroy (this->blob); |
|
209 this->blob = nullptr; |
|
210 this->start = this->end = nullptr; |
|
211 } |
|
212 |
|
213 inline bool check_range (const void *base, unsigned int len) const |
|
214 { |
|
215 const char *p = (const char *) base; |
|
216 bool ok = this->max_ops-- > 0 && |
|
217 this->start <= p && |
|
218 p <= this->end && |
|
219 (unsigned int) (this->end - p) >= len; |
|
220 |
|
221 DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0, |
|
222 "check_range [%p..%p] (%d bytes) in [%p..%p] -> %s", |
|
223 p, p + len, len, |
|
224 this->start, this->end, |
|
225 ok ? "OK" : "OUT-OF-RANGE"); |
|
226 |
|
227 return likely (ok); |
|
228 } |
|
229 |
|
230 inline bool check_array (const void *base, unsigned int record_size, unsigned int len) const |
|
231 { |
|
232 const char *p = (const char *) base; |
|
233 bool overflows = _hb_unsigned_int_mul_overflows (len, record_size); |
|
234 unsigned int array_size = record_size * len; |
|
235 bool ok = !overflows && this->check_range (base, array_size); |
|
236 |
|
237 DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0, |
|
238 "check_array [%p..%p] (%d*%d=%d bytes) in [%p..%p] -> %s", |
|
239 p, p + (record_size * len), record_size, len, (unsigned int) array_size, |
|
240 this->start, this->end, |
|
241 overflows ? "OVERFLOWS" : ok ? "OK" : "OUT-OF-RANGE"); |
|
242 |
|
243 return likely (ok); |
|
244 } |
|
245 |
|
246 template <typename Type> |
|
247 inline bool check_struct (const Type *obj) const |
|
248 { |
|
249 return likely (this->check_range (obj, obj->min_size)); |
|
250 } |
|
251 |
|
252 inline bool may_edit (const void *base, unsigned int len) |
|
253 { |
|
254 if (this->edit_count >= HB_SANITIZE_MAX_EDITS) |
|
255 return false; |
|
256 |
|
257 const char *p = (const char *) base; |
|
258 this->edit_count++; |
|
259 |
|
260 DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0, |
|
261 "may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s", |
|
262 this->edit_count, |
|
263 p, p + len, len, |
|
264 this->start, this->end, |
|
265 this->writable ? "GRANTED" : "DENIED"); |
|
266 |
|
267 return this->writable; |
|
268 } |
|
269 |
|
270 template <typename Type, typename ValueType> |
|
271 inline bool try_set (const Type *obj, const ValueType &v) { |
|
272 if (this->may_edit (obj, obj->static_size)) { |
|
273 const_cast<Type *> (obj)->set (v); |
|
274 return true; |
|
275 } |
|
276 return false; |
|
277 } |
|
278 |
|
279 mutable unsigned int debug_depth; |
|
280 const char *start, *end; |
|
281 bool writable; |
|
282 unsigned int edit_count; |
|
283 mutable int max_ops; |
|
284 hb_blob_t *blob; |
|
285 unsigned int num_glyphs; |
|
286 }; |
|
287 |
|
288 |
|
289 |
|
290 /* Template to sanitize an object. */ |
|
291 template <typename Type> |
|
292 struct Sanitizer |
|
293 { |
|
294 inline Sanitizer (void) {} |
|
295 |
|
296 inline hb_blob_t *sanitize (hb_blob_t *blob) { |
|
297 bool sane; |
|
298 |
|
299 /* TODO is_sane() stuff */ |
|
300 |
|
301 c->init (blob); |
|
302 |
|
303 retry: |
|
304 DEBUG_MSG_FUNC (SANITIZE, c->start, "start"); |
|
305 |
|
306 c->start_processing (); |
|
307 |
|
308 if (unlikely (!c->start)) { |
|
309 c->end_processing (); |
|
310 return blob; |
|
311 } |
|
312 |
|
313 Type *t = CastP<Type> (const_cast<char *> (c->start)); |
|
314 |
|
315 sane = t->sanitize (c); |
|
316 if (sane) { |
|
317 if (c->edit_count) { |
|
318 DEBUG_MSG_FUNC (SANITIZE, c->start, "passed first round with %d edits; going for second round", c->edit_count); |
|
319 |
|
320 /* sanitize again to ensure no toe-stepping */ |
|
321 c->edit_count = 0; |
|
322 sane = t->sanitize (c); |
|
323 if (c->edit_count) { |
|
324 DEBUG_MSG_FUNC (SANITIZE, c->start, "requested %d edits in second round; FAILLING", c->edit_count); |
|
325 sane = false; |
|
326 } |
|
327 } |
|
328 } else { |
|
329 unsigned int edit_count = c->edit_count; |
|
330 if (edit_count && !c->writable) { |
|
331 c->start = hb_blob_get_data_writable (blob, nullptr); |
|
332 c->end = c->start + blob->length; |
|
333 |
|
334 if (c->start) { |
|
335 c->writable = true; |
|
336 /* ok, we made it writable by relocating. try again */ |
|
337 DEBUG_MSG_FUNC (SANITIZE, c->start, "retry"); |
|
338 goto retry; |
|
339 } |
|
340 } |
|
341 } |
|
342 |
|
343 c->end_processing (); |
|
344 |
|
345 DEBUG_MSG_FUNC (SANITIZE, c->start, sane ? "PASSED" : "FAILED"); |
|
346 if (sane) |
|
347 { |
|
348 blob->lock (); |
|
349 return blob; |
|
350 } |
|
351 else |
|
352 { |
|
353 hb_blob_destroy (blob); |
|
354 return hb_blob_get_empty (); |
|
355 } |
|
356 } |
|
357 |
|
358 inline void set_num_glyphs (unsigned int num_glyphs) { c->num_glyphs = num_glyphs; } |
|
359 |
|
360 private: |
|
361 hb_sanitize_context_t c[1]; |
|
362 }; |
|
363 |
|
364 |
|
365 |
|
366 /* |
|
367 * Serialize |
|
368 */ |
|
369 |
|
370 |
|
371 struct hb_serialize_context_t |
|
372 { |
|
373 inline hb_serialize_context_t (void *start_, unsigned int size) |
|
374 { |
|
375 this->start = (char *) start_; |
|
376 this->end = this->start + size; |
|
377 |
|
378 this->ran_out_of_room = false; |
|
379 this->head = this->start; |
|
380 this->debug_depth = 0; |
|
381 } |
|
382 |
|
383 template <typename Type> |
|
384 inline Type *start_serialize (void) |
|
385 { |
|
386 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1, |
|
387 "start [%p..%p] (%lu bytes)", |
|
388 this->start, this->end, |
|
389 (unsigned long) (this->end - this->start)); |
|
390 |
|
391 return start_embed<Type> (); |
|
392 } |
|
393 |
|
394 inline void end_serialize (void) |
|
395 { |
|
396 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1, |
|
397 "end [%p..%p] serialized %d bytes; %s", |
|
398 this->start, this->end, |
|
399 (int) (this->head - this->start), |
|
400 this->ran_out_of_room ? "RAN OUT OF ROOM" : "did not ran out of room"); |
|
401 |
|
402 } |
|
403 |
|
404 template <typename Type> |
|
405 inline Type *copy (void) |
|
406 { |
|
407 assert (!this->ran_out_of_room); |
|
408 unsigned int len = this->head - this->start; |
|
409 void *p = malloc (len); |
|
410 if (p) |
|
411 memcpy (p, this->start, len); |
|
412 return reinterpret_cast<Type *> (p); |
|
413 } |
|
414 |
|
415 template <typename Type> |
|
416 inline Type *allocate_size (unsigned int size) |
|
417 { |
|
418 if (unlikely (this->ran_out_of_room || this->end - this->head < ptrdiff_t (size))) { |
|
419 this->ran_out_of_room = true; |
|
420 return nullptr; |
|
421 } |
|
422 memset (this->head, 0, size); |
|
423 char *ret = this->head; |
|
424 this->head += size; |
|
425 return reinterpret_cast<Type *> (ret); |
|
426 } |
|
427 |
|
428 template <typename Type> |
|
429 inline Type *allocate_min (void) |
|
430 { |
|
431 return this->allocate_size<Type> (Type::min_size); |
|
432 } |
|
433 |
|
434 template <typename Type> |
|
435 inline Type *start_embed (void) |
|
436 { |
|
437 Type *ret = reinterpret_cast<Type *> (this->head); |
|
438 return ret; |
|
439 } |
|
440 |
|
441 template <typename Type> |
|
442 inline Type *embed (const Type &obj) |
|
443 { |
|
444 unsigned int size = obj.get_size (); |
|
445 Type *ret = this->allocate_size<Type> (size); |
|
446 if (unlikely (!ret)) return nullptr; |
|
447 memcpy (ret, obj, size); |
|
448 return ret; |
|
449 } |
|
450 |
|
451 template <typename Type> |
|
452 inline Type *extend_min (Type &obj) |
|
453 { |
|
454 unsigned int size = obj.min_size; |
|
455 assert (this->start <= (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head); |
|
456 if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr; |
|
457 return reinterpret_cast<Type *> (&obj); |
|
458 } |
|
459 |
|
460 template <typename Type> |
|
461 inline Type *extend (Type &obj) |
|
462 { |
|
463 unsigned int size = obj.get_size (); |
|
464 assert (this->start < (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head); |
|
465 if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr; |
|
466 return reinterpret_cast<Type *> (&obj); |
|
467 } |
|
468 |
|
469 inline void truncate (void *new_head) |
|
470 { |
|
471 assert (this->start < new_head && new_head <= this->head); |
|
472 this->head = (char *) new_head; |
|
473 } |
|
474 |
|
475 unsigned int debug_depth; |
|
476 char *start, *end, *head; |
|
477 bool ran_out_of_room; |
|
478 }; |
|
479 |
|
480 template <typename Type> |
|
481 struct Supplier |
|
482 { |
|
483 inline Supplier (const Type *array, unsigned int len_, unsigned int stride_=sizeof(Type)) |
|
484 { |
|
485 head = array; |
|
486 len = len_; |
|
487 stride = stride_; |
|
488 } |
|
489 inline const Type operator [] (unsigned int i) const |
|
490 { |
|
491 if (unlikely (i >= len)) return Type (); |
|
492 return * (const Type *) (const void *) ((const char *) head + stride * i); |
|
493 } |
|
494 |
|
495 inline Supplier<Type> & operator += (unsigned int count) |
|
496 { |
|
497 if (unlikely (count > len)) |
|
498 count = len; |
|
499 len -= count; |
|
500 head = (const Type *) (const void *) ((const char *) head + stride * count); |
|
501 return *this; |
|
502 } |
|
503 |
|
504 private: |
|
505 inline Supplier (const Supplier<Type> &); /* Disallow copy */ |
|
506 inline Supplier<Type>& operator= (const Supplier<Type> &); /* Disallow copy */ |
|
507 |
|
508 unsigned int len; |
|
509 unsigned int stride; |
|
510 const Type *head; |
|
511 }; |
|
512 |
|
513 |
|
514 /* |
|
515 * |
|
516 * The OpenType Font File: Data Types |
|
517 */ |
|
518 |
|
519 |
|
520 /* "The following data types are used in the OpenType font file. |
|
521 * All OpenType fonts use Motorola-style byte ordering (Big Endian):" */ |
|
522 |
|
523 /* |
|
524 * Int types |
|
525 */ |
|
526 |
|
527 |
|
528 template <typename Type, int Bytes> struct BEInt; |
|
529 |
|
530 template <typename Type> |
|
531 struct BEInt<Type, 1> |
|
532 { |
|
533 public: |
|
534 inline void set (Type V) |
|
535 { |
|
536 v = V; |
|
537 } |
|
538 inline operator Type (void) const |
|
539 { |
|
540 return v; |
|
541 } |
|
542 private: uint8_t v; |
|
543 }; |
|
544 template <typename Type> |
|
545 struct BEInt<Type, 2> |
|
546 { |
|
547 public: |
|
548 inline void set (Type V) |
|
549 { |
|
550 v[0] = (V >> 8) & 0xFF; |
|
551 v[1] = (V ) & 0xFF; |
|
552 } |
|
553 inline operator Type (void) const |
|
554 { |
|
555 return (v[0] << 8) |
|
556 + (v[1] ); |
|
557 } |
|
558 private: uint8_t v[2]; |
|
559 }; |
|
560 template <typename Type> |
|
561 struct BEInt<Type, 3> |
|
562 { |
|
563 public: |
|
564 inline void set (Type V) |
|
565 { |
|
566 v[0] = (V >> 16) & 0xFF; |
|
567 v[1] = (V >> 8) & 0xFF; |
|
568 v[2] = (V ) & 0xFF; |
|
569 } |
|
570 inline operator Type (void) const |
|
571 { |
|
572 return (v[0] << 16) |
|
573 + (v[1] << 8) |
|
574 + (v[2] ); |
|
575 } |
|
576 private: uint8_t v[3]; |
|
577 }; |
|
578 template <typename Type> |
|
579 struct BEInt<Type, 4> |
|
580 { |
|
581 public: |
|
582 inline void set (Type V) |
|
583 { |
|
584 v[0] = (V >> 24) & 0xFF; |
|
585 v[1] = (V >> 16) & 0xFF; |
|
586 v[2] = (V >> 8) & 0xFF; |
|
587 v[3] = (V ) & 0xFF; |
|
588 } |
|
589 inline operator Type (void) const |
|
590 { |
|
591 return (v[0] << 24) |
|
592 + (v[1] << 16) |
|
593 + (v[2] << 8) |
|
594 + (v[3] ); |
|
595 } |
|
596 private: uint8_t v[4]; |
|
597 }; |
|
598 |
|
599 /* Integer types in big-endian order and no alignment requirement */ |
|
600 template <typename Type, unsigned int Size> |
|
601 struct IntType |
|
602 { |
|
603 inline void set (Type i) { v.set (i); } |
|
604 inline operator Type(void) const { return v; } |
|
605 inline bool operator == (const IntType<Type,Size> &o) const { return (Type) v == (Type) o.v; } |
|
606 inline bool operator != (const IntType<Type,Size> &o) const { return !(*this == o); } |
|
607 static inline int cmp (const IntType<Type,Size> *a, const IntType<Type,Size> *b) { return b->cmp (*a); } |
|
608 template <typename Type2> |
|
609 inline int cmp (Type2 a) const |
|
610 { |
|
611 Type b = v; |
|
612 if (sizeof (Type) < sizeof (int) && sizeof (Type2) < sizeof (int)) |
|
613 return (int) a - (int) b; |
|
614 else |
|
615 return a < b ? -1 : a == b ? 0 : +1; |
|
616 } |
|
617 inline bool sanitize (hb_sanitize_context_t *c) const |
|
618 { |
|
619 TRACE_SANITIZE (this); |
|
620 return_trace (likely (c->check_struct (this))); |
|
621 } |
|
622 protected: |
|
623 BEInt<Type, Size> v; |
|
624 public: |
|
625 DEFINE_SIZE_STATIC (Size); |
|
626 }; |
|
627 |
|
628 typedef IntType<uint8_t, 1> HBUINT8; /* 8-bit unsigned integer. */ |
|
629 typedef IntType<int8_t, 1> HBINT8; /* 8-bit signed integer. */ |
|
630 typedef IntType<uint16_t, 2> HBUINT16; /* 16-bit unsigned integer. */ |
|
631 typedef IntType<int16_t, 2> HBINT16; /* 16-bit signed integer. */ |
|
632 typedef IntType<uint32_t, 4> HBUINT32; /* 32-bit unsigned integer. */ |
|
633 typedef IntType<int32_t, 4> HBINT32; /* 32-bit signed integer. */ |
|
634 typedef IntType<uint32_t, 3> HBUINT24; /* 24-bit unsigned integer. */ |
|
635 |
|
636 /* 16-bit signed integer (HBINT16) that describes a quantity in FUnits. */ |
|
637 typedef HBINT16 FWORD; |
|
638 |
|
639 /* 16-bit unsigned integer (HBUINT16) that describes a quantity in FUnits. */ |
|
640 typedef HBUINT16 UFWORD; |
|
641 |
|
642 /* 16-bit signed fixed number with the low 14 bits of fraction (2.14). */ |
|
643 struct F2DOT14 : HBINT16 |
|
644 { |
|
645 // 16384 means 1<<14 |
|
646 inline float to_float (void) const { return ((int32_t) v) / 16384.f; } |
|
647 inline void set_float (float f) { v.set (round (f * 16384.f)); } |
|
648 public: |
|
649 DEFINE_SIZE_STATIC (2); |
|
650 }; |
|
651 |
|
652 /* 32-bit signed fixed-point number (16.16). */ |
|
653 struct Fixed : HBINT32 |
|
654 { |
|
655 // 65536 means 1<<16 |
|
656 inline float to_float (void) const { return ((int32_t) v) / 65536.f; } |
|
657 inline void set_float (float f) { v.set (round (f * 65536.f)); } |
|
658 public: |
|
659 DEFINE_SIZE_STATIC (4); |
|
660 }; |
|
661 |
|
662 /* Date represented in number of seconds since 12:00 midnight, January 1, |
|
663 * 1904. The value is represented as a signed 64-bit integer. */ |
|
664 struct LONGDATETIME |
|
665 { |
|
666 inline bool sanitize (hb_sanitize_context_t *c) const |
|
667 { |
|
668 TRACE_SANITIZE (this); |
|
669 return_trace (likely (c->check_struct (this))); |
|
670 } |
|
671 protected: |
|
672 HBINT32 major; |
|
673 HBUINT32 minor; |
|
674 public: |
|
675 DEFINE_SIZE_STATIC (8); |
|
676 }; |
|
677 |
|
678 /* Array of four uint8s (length = 32 bits) used to identify a script, language |
|
679 * system, feature, or baseline */ |
|
680 struct Tag : HBUINT32 |
|
681 { |
|
682 /* What the char* converters return is NOT nul-terminated. Print using "%.4s" */ |
|
683 inline operator const char* (void) const { return reinterpret_cast<const char *> (&this->v); } |
|
684 inline operator char* (void) { return reinterpret_cast<char *> (&this->v); } |
|
685 public: |
|
686 DEFINE_SIZE_STATIC (4); |
|
687 }; |
|
688 DEFINE_NULL_DATA (OT, Tag, " "); |
|
689 |
|
690 /* Glyph index number, same as uint16 (length = 16 bits) */ |
|
691 typedef HBUINT16 GlyphID; |
|
692 |
|
693 /* Name-table index, same as uint16 (length = 16 bits) */ |
|
694 typedef HBUINT16 NameID; |
|
695 |
|
696 /* Script/language-system/feature index */ |
|
697 struct Index : HBUINT16 { |
|
698 static const unsigned int NOT_FOUND_INDEX = 0xFFFFu; |
|
699 }; |
|
700 DEFINE_NULL_DATA (OT, Index, "\xff\xff"); |
|
701 |
|
702 /* Offset, Null offset = 0 */ |
|
703 template <typename Type> |
|
704 struct Offset : Type |
|
705 { |
|
706 inline bool is_null (void) const { return 0 == *this; } |
|
707 |
|
708 inline void *serialize (hb_serialize_context_t *c, const void *base) |
|
709 { |
|
710 void *t = c->start_embed<void> (); |
|
711 this->set ((char *) t - (char *) base); /* TODO(serialize) Overflow? */ |
|
712 return t; |
|
713 } |
|
714 |
|
715 public: |
|
716 DEFINE_SIZE_STATIC (sizeof(Type)); |
|
717 }; |
|
718 |
|
719 typedef Offset<HBUINT16> Offset16; |
|
720 typedef Offset<HBUINT32> Offset32; |
|
721 |
|
722 |
|
723 /* CheckSum */ |
|
724 struct CheckSum : HBUINT32 |
|
725 { |
|
726 /* This is reference implementation from the spec. */ |
|
727 static inline uint32_t CalcTableChecksum (const HBUINT32 *Table, uint32_t Length) |
|
728 { |
|
729 uint32_t Sum = 0L; |
|
730 assert (0 == (Length & 3)); |
|
731 const HBUINT32 *EndPtr = Table + Length / HBUINT32::static_size; |
|
732 |
|
733 while (Table < EndPtr) |
|
734 Sum += *Table++; |
|
735 return Sum; |
|
736 } |
|
737 |
|
738 /* Note: data should be 4byte aligned and have 4byte padding at the end. */ |
|
739 inline void set_for_data (const void *data, unsigned int length) |
|
740 { set (CalcTableChecksum ((const HBUINT32 *) data, length)); } |
|
741 |
|
742 public: |
|
743 DEFINE_SIZE_STATIC (4); |
|
744 }; |
|
745 |
|
746 |
|
747 /* |
|
748 * Version Numbers |
|
749 */ |
|
750 |
|
751 template <typename FixedType=HBUINT16> |
|
752 struct FixedVersion |
|
753 { |
|
754 inline uint32_t to_int (void) const { return (major << (sizeof(FixedType) * 8)) + minor; } |
|
755 |
|
756 inline bool sanitize (hb_sanitize_context_t *c) const |
|
757 { |
|
758 TRACE_SANITIZE (this); |
|
759 return_trace (c->check_struct (this)); |
|
760 } |
|
761 |
|
762 FixedType major; |
|
763 FixedType minor; |
|
764 public: |
|
765 DEFINE_SIZE_STATIC (2 * sizeof(FixedType)); |
|
766 }; |
|
767 |
|
768 |
|
769 |
|
770 /* |
|
771 * Template subclasses of Offset that do the dereferencing. |
|
772 * Use: (base+offset) |
|
773 */ |
|
774 |
|
775 template <typename Type, typename OffsetType=HBUINT16> |
|
776 struct OffsetTo : Offset<OffsetType> |
|
777 { |
|
778 inline const Type& operator () (const void *base) const |
|
779 { |
|
780 unsigned int offset = *this; |
|
781 if (unlikely (!offset)) return Null(Type); |
|
782 return StructAtOffset<const Type> (base, offset); |
|
783 } |
|
784 inline Type& operator () (void *base) const |
|
785 { |
|
786 unsigned int offset = *this; |
|
787 if (unlikely (!offset)) return Crap(Type); |
|
788 return StructAtOffset<Type> (base, offset); |
|
789 } |
|
790 |
|
791 inline Type& serialize (hb_serialize_context_t *c, const void *base) |
|
792 { |
|
793 return * (Type *) Offset<OffsetType>::serialize (c, base); |
|
794 } |
|
795 |
|
796 inline bool sanitize (hb_sanitize_context_t *c, const void *base) const |
|
797 { |
|
798 TRACE_SANITIZE (this); |
|
799 if (unlikely (!c->check_struct (this))) return_trace (false); |
|
800 unsigned int offset = *this; |
|
801 if (unlikely (!offset)) return_trace (true); |
|
802 if (unlikely (!c->check_range (base, offset))) return_trace (false); |
|
803 const Type &obj = StructAtOffset<Type> (base, offset); |
|
804 return_trace (likely (obj.sanitize (c)) || neuter (c)); |
|
805 } |
|
806 template <typename T> |
|
807 inline bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const |
|
808 { |
|
809 TRACE_SANITIZE (this); |
|
810 if (unlikely (!c->check_struct (this))) return_trace (false); |
|
811 unsigned int offset = *this; |
|
812 if (unlikely (!offset)) return_trace (true); |
|
813 if (unlikely (!c->check_range (base, offset))) return_trace (false); |
|
814 const Type &obj = StructAtOffset<Type> (base, offset); |
|
815 return_trace (likely (obj.sanitize (c, user_data)) || neuter (c)); |
|
816 } |
|
817 |
|
818 /* Set the offset to Null */ |
|
819 inline bool neuter (hb_sanitize_context_t *c) const { |
|
820 return c->try_set (this, 0); |
|
821 } |
|
822 DEFINE_SIZE_STATIC (sizeof(OffsetType)); |
|
823 }; |
|
824 template <typename Type> struct LOffsetTo : OffsetTo<Type, HBUINT32> {}; |
|
825 template <typename Base, typename OffsetType, typename Type> |
|
826 static inline const Type& operator + (const Base &base, const OffsetTo<Type, OffsetType> &offset) { return offset (base); } |
|
827 template <typename Base, typename OffsetType, typename Type> |
|
828 static inline Type& operator + (Base &base, OffsetTo<Type, OffsetType> &offset) { return offset (base); } |
|
829 |
|
830 |
|
831 /* |
|
832 * Array Types |
|
833 */ |
|
834 |
|
835 |
|
836 /* TODO Use it in ArrayOf, HeadlessArrayOf, and other places around the code base?? */ |
|
837 template <typename Type> |
|
838 struct UnsizedArrayOf |
|
839 { |
|
840 inline const Type& operator [] (unsigned int i) const { return arrayZ[i]; } |
|
841 inline Type& operator [] (unsigned int i) { return arrayZ[i]; } |
|
842 |
|
843 inline bool sanitize (hb_sanitize_context_t *c, unsigned int count) const |
|
844 { |
|
845 TRACE_SANITIZE (this); |
|
846 if (unlikely (!sanitize_shallow (c, count))) return_trace (false); |
|
847 |
|
848 /* Note: for structs that do not reference other structs, |
|
849 * we do not need to call their sanitize() as we already did |
|
850 * a bound check on the aggregate array size. We just include |
|
851 * a small unreachable expression to make sure the structs |
|
852 * pointed to do have a simple sanitize(), ie. they do not |
|
853 * reference other structs via offsets. |
|
854 */ |
|
855 (void) (false && arrayZ[0].sanitize (c)); |
|
856 |
|
857 return_trace (true); |
|
858 } |
|
859 inline bool sanitize (hb_sanitize_context_t *c, unsigned int count, const void *base) const |
|
860 { |
|
861 TRACE_SANITIZE (this); |
|
862 if (unlikely (!sanitize_shallow (c, count))) return_trace (false); |
|
863 for (unsigned int i = 0; i < count; i++) |
|
864 if (unlikely (!arrayZ[i].sanitize (c, base))) |
|
865 return_trace (false); |
|
866 return_trace (true); |
|
867 } |
|
868 template <typename T> |
|
869 inline bool sanitize (hb_sanitize_context_t *c, unsigned int count, const void *base, T user_data) const |
|
870 { |
|
871 TRACE_SANITIZE (this); |
|
872 if (unlikely (!sanitize_shallow (c, count))) return_trace (false); |
|
873 for (unsigned int i = 0; i < count; i++) |
|
874 if (unlikely (!arrayZ[i].sanitize (c, base, user_data))) |
|
875 return_trace (false); |
|
876 return_trace (true); |
|
877 } |
|
878 |
|
879 inline bool sanitize_shallow (hb_sanitize_context_t *c, unsigned int count) const |
|
880 { |
|
881 TRACE_SANITIZE (this); |
|
882 return_trace (c->check_array (arrayZ, arrayZ[0].static_size, count)); |
|
883 } |
|
884 |
|
885 public: |
|
886 Type arrayZ[VAR]; |
|
887 public: |
|
888 DEFINE_SIZE_ARRAY (0, arrayZ); |
|
889 }; |
|
890 |
|
891 /* Unsized array of offset's */ |
|
892 template <typename Type, typename OffsetType> |
|
893 struct UnsizedOffsetArrayOf : UnsizedArrayOf<OffsetTo<Type, OffsetType> > {}; |
|
894 |
|
895 /* Unsized array of offsets relative to the beginning of the array itself. */ |
|
896 template <typename Type, typename OffsetType> |
|
897 struct UnsizedOffsetListOf : UnsizedOffsetArrayOf<Type, OffsetType> |
|
898 { |
|
899 inline const Type& operator [] (unsigned int i) const |
|
900 { |
|
901 return this+this->arrayZ[i]; |
|
902 } |
|
903 |
|
904 inline bool sanitize (hb_sanitize_context_t *c, unsigned int count) const |
|
905 { |
|
906 TRACE_SANITIZE (this); |
|
907 return_trace ((UnsizedOffsetArrayOf<Type, OffsetType>::sanitize (c, count, this))); |
|
908 } |
|
909 template <typename T> |
|
910 inline bool sanitize (hb_sanitize_context_t *c, unsigned int count, T user_data) const |
|
911 { |
|
912 TRACE_SANITIZE (this); |
|
913 return_trace ((UnsizedOffsetArrayOf<Type, OffsetType>::sanitize (c, count, this, user_data))); |
|
914 } |
|
915 }; |
|
916 |
|
917 |
|
918 /* An array with a number of elements. */ |
|
919 template <typename Type, typename LenType=HBUINT16> |
|
920 struct ArrayOf |
|
921 { |
|
922 const Type *sub_array (unsigned int start_offset, unsigned int *pcount /* IN/OUT */) const |
|
923 { |
|
924 unsigned int count = len; |
|
925 if (unlikely (start_offset > count)) |
|
926 count = 0; |
|
927 else |
|
928 count -= start_offset; |
|
929 count = MIN (count, *pcount); |
|
930 *pcount = count; |
|
931 return arrayZ + start_offset; |
|
932 } |
|
933 |
|
934 inline const Type& operator [] (unsigned int i) const |
|
935 { |
|
936 if (unlikely (i >= len)) return Null(Type); |
|
937 return arrayZ[i]; |
|
938 } |
|
939 inline Type& operator [] (unsigned int i) |
|
940 { |
|
941 if (unlikely (i >= len)) return Crap(Type); |
|
942 return arrayZ[i]; |
|
943 } |
|
944 inline unsigned int get_size (void) const |
|
945 { return len.static_size + len * Type::static_size; } |
|
946 |
|
947 inline bool serialize (hb_serialize_context_t *c, |
|
948 unsigned int items_len) |
|
949 { |
|
950 TRACE_SERIALIZE (this); |
|
951 if (unlikely (!c->extend_min (*this))) return_trace (false); |
|
952 len.set (items_len); /* TODO(serialize) Overflow? */ |
|
953 if (unlikely (!c->extend (*this))) return_trace (false); |
|
954 return_trace (true); |
|
955 } |
|
956 |
|
957 inline bool serialize (hb_serialize_context_t *c, |
|
958 Supplier<Type> &items, |
|
959 unsigned int items_len) |
|
960 { |
|
961 TRACE_SERIALIZE (this); |
|
962 if (unlikely (!serialize (c, items_len))) return_trace (false); |
|
963 for (unsigned int i = 0; i < items_len; i++) |
|
964 arrayZ[i] = items[i]; |
|
965 items += items_len; |
|
966 return_trace (true); |
|
967 } |
|
968 |
|
969 inline bool sanitize (hb_sanitize_context_t *c) const |
|
970 { |
|
971 TRACE_SANITIZE (this); |
|
972 if (unlikely (!sanitize_shallow (c))) return_trace (false); |
|
973 |
|
974 /* Note: for structs that do not reference other structs, |
|
975 * we do not need to call their sanitize() as we already did |
|
976 * a bound check on the aggregate array size. We just include |
|
977 * a small unreachable expression to make sure the structs |
|
978 * pointed to do have a simple sanitize(), ie. they do not |
|
979 * reference other structs via offsets. |
|
980 */ |
|
981 (void) (false && arrayZ[0].sanitize (c)); |
|
982 |
|
983 return_trace (true); |
|
984 } |
|
985 inline bool sanitize (hb_sanitize_context_t *c, const void *base) const |
|
986 { |
|
987 TRACE_SANITIZE (this); |
|
988 if (unlikely (!sanitize_shallow (c))) return_trace (false); |
|
989 unsigned int count = len; |
|
990 for (unsigned int i = 0; i < count; i++) |
|
991 if (unlikely (!arrayZ[i].sanitize (c, base))) |
|
992 return_trace (false); |
|
993 return_trace (true); |
|
994 } |
|
995 template <typename T> |
|
996 inline bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const |
|
997 { |
|
998 TRACE_SANITIZE (this); |
|
999 if (unlikely (!sanitize_shallow (c))) return_trace (false); |
|
1000 unsigned int count = len; |
|
1001 for (unsigned int i = 0; i < count; i++) |
|
1002 if (unlikely (!arrayZ[i].sanitize (c, base, user_data))) |
|
1003 return_trace (false); |
|
1004 return_trace (true); |
|
1005 } |
|
1006 |
|
1007 template <typename SearchType> |
|
1008 inline int lsearch (const SearchType &x) const |
|
1009 { |
|
1010 unsigned int count = len; |
|
1011 for (unsigned int i = 0; i < count; i++) |
|
1012 if (!this->arrayZ[i].cmp (x)) |
|
1013 return i; |
|
1014 return -1; |
|
1015 } |
|
1016 |
|
1017 inline void qsort (void) |
|
1018 { |
|
1019 ::qsort (arrayZ, len, sizeof (Type), Type::cmp); |
|
1020 } |
|
1021 |
|
1022 private: |
|
1023 inline bool sanitize_shallow (hb_sanitize_context_t *c) const |
|
1024 { |
|
1025 TRACE_SANITIZE (this); |
|
1026 return_trace (len.sanitize (c) && c->check_array (arrayZ, Type::static_size, len)); |
|
1027 } |
|
1028 |
|
1029 public: |
|
1030 LenType len; |
|
1031 Type arrayZ[VAR]; |
|
1032 public: |
|
1033 DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ); |
|
1034 }; |
|
1035 template <typename Type> struct LArrayOf : ArrayOf<Type, HBUINT32> {}; |
|
1036 typedef ArrayOf<HBUINT8, HBUINT8> PString; |
|
1037 |
|
1038 /* Array of Offset's */ |
|
1039 template <typename Type, typename OffsetType=HBUINT16> |
|
1040 struct OffsetArrayOf : ArrayOf<OffsetTo<Type, OffsetType> > {}; |
|
1041 |
|
1042 /* Array of offsets relative to the beginning of the array itself. */ |
|
1043 template <typename Type> |
|
1044 struct OffsetListOf : OffsetArrayOf<Type> |
|
1045 { |
|
1046 inline const Type& operator [] (unsigned int i) const |
|
1047 { |
|
1048 if (unlikely (i >= this->len)) return Null(Type); |
|
1049 return this+this->arrayZ[i]; |
|
1050 } |
|
1051 inline const Type& operator [] (unsigned int i) |
|
1052 { |
|
1053 if (unlikely (i >= this->len)) return Crap(Type); |
|
1054 return this+this->arrayZ[i]; |
|
1055 } |
|
1056 |
|
1057 inline bool sanitize (hb_sanitize_context_t *c) const |
|
1058 { |
|
1059 TRACE_SANITIZE (this); |
|
1060 return_trace (OffsetArrayOf<Type>::sanitize (c, this)); |
|
1061 } |
|
1062 template <typename T> |
|
1063 inline bool sanitize (hb_sanitize_context_t *c, T user_data) const |
|
1064 { |
|
1065 TRACE_SANITIZE (this); |
|
1066 return_trace (OffsetArrayOf<Type>::sanitize (c, this, user_data)); |
|
1067 } |
|
1068 }; |
|
1069 |
|
1070 |
|
1071 /* An array starting at second element. */ |
|
1072 template <typename Type, typename LenType=HBUINT16> |
|
1073 struct HeadlessArrayOf |
|
1074 { |
|
1075 inline const Type& operator [] (unsigned int i) const |
|
1076 { |
|
1077 if (unlikely (i >= len || !i)) return Null(Type); |
|
1078 return arrayZ[i-1]; |
|
1079 } |
|
1080 inline Type& operator [] (unsigned int i) |
|
1081 { |
|
1082 if (unlikely (i >= len || !i)) return Crap(Type); |
|
1083 return arrayZ[i-1]; |
|
1084 } |
|
1085 inline unsigned int get_size (void) const |
|
1086 { return len.static_size + (len ? len - 1 : 0) * Type::static_size; } |
|
1087 |
|
1088 inline bool serialize (hb_serialize_context_t *c, |
|
1089 Supplier<Type> &items, |
|
1090 unsigned int items_len) |
|
1091 { |
|
1092 TRACE_SERIALIZE (this); |
|
1093 if (unlikely (!c->extend_min (*this))) return_trace (false); |
|
1094 len.set (items_len); /* TODO(serialize) Overflow? */ |
|
1095 if (unlikely (!items_len)) return_trace (true); |
|
1096 if (unlikely (!c->extend (*this))) return_trace (false); |
|
1097 for (unsigned int i = 0; i < items_len - 1; i++) |
|
1098 arrayZ[i] = items[i]; |
|
1099 items += items_len - 1; |
|
1100 return_trace (true); |
|
1101 } |
|
1102 |
|
1103 inline bool sanitize (hb_sanitize_context_t *c) const |
|
1104 { |
|
1105 TRACE_SANITIZE (this); |
|
1106 if (unlikely (!sanitize_shallow (c))) return_trace (false); |
|
1107 |
|
1108 /* Note: for structs that do not reference other structs, |
|
1109 * we do not need to call their sanitize() as we already did |
|
1110 * a bound check on the aggregate array size. We just include |
|
1111 * a small unreachable expression to make sure the structs |
|
1112 * pointed to do have a simple sanitize(), ie. they do not |
|
1113 * reference other structs via offsets. |
|
1114 */ |
|
1115 (void) (false && arrayZ[0].sanitize (c)); |
|
1116 |
|
1117 return_trace (true); |
|
1118 } |
|
1119 |
|
1120 private: |
|
1121 inline bool sanitize_shallow (hb_sanitize_context_t *c) const |
|
1122 { |
|
1123 TRACE_SANITIZE (this); |
|
1124 return_trace (len.sanitize (c) && |
|
1125 (!len || c->check_array (arrayZ, Type::static_size, len - 1))); |
|
1126 } |
|
1127 |
|
1128 public: |
|
1129 LenType len; |
|
1130 Type arrayZ[VAR]; |
|
1131 public: |
|
1132 DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ); |
|
1133 }; |
|
1134 |
|
1135 |
|
1136 /* |
|
1137 * An array with sorted elements. Supports binary searching. |
|
1138 */ |
|
1139 template <typename Type, typename LenType=HBUINT16> |
|
1140 struct SortedArrayOf : ArrayOf<Type, LenType> |
|
1141 { |
|
1142 template <typename SearchType> |
|
1143 inline int bsearch (const SearchType &x) const |
|
1144 { |
|
1145 /* Hand-coded bsearch here since this is in the hot inner loop. */ |
|
1146 const Type *arr = this->arrayZ; |
|
1147 int min = 0, max = (int) this->len - 1; |
|
1148 while (min <= max) |
|
1149 { |
|
1150 int mid = (min + max) / 2; |
|
1151 int c = arr[mid].cmp (x); |
|
1152 if (c < 0) |
|
1153 max = mid - 1; |
|
1154 else if (c > 0) |
|
1155 min = mid + 1; |
|
1156 else |
|
1157 return mid; |
|
1158 } |
|
1159 return -1; |
|
1160 } |
|
1161 }; |
|
1162 |
|
1163 /* |
|
1164 * Binary-search arrays |
|
1165 */ |
|
1166 |
|
1167 struct BinSearchHeader |
|
1168 { |
|
1169 inline operator uint32_t (void) const { return len; } |
|
1170 |
|
1171 inline bool sanitize (hb_sanitize_context_t *c) const |
|
1172 { |
|
1173 TRACE_SANITIZE (this); |
|
1174 return_trace (c->check_struct (this)); |
|
1175 } |
|
1176 |
|
1177 inline void set (unsigned int v) |
|
1178 { |
|
1179 len.set (v); |
|
1180 assert (len == v); |
|
1181 entrySelector.set (MAX (1u, _hb_bit_storage (v)) - 1); |
|
1182 searchRange.set (16 * (1u << entrySelector)); |
|
1183 rangeShift.set (v * 16 > searchRange |
|
1184 ? 16 * v - searchRange |
|
1185 : 0); |
|
1186 } |
|
1187 |
|
1188 protected: |
|
1189 HBUINT16 len; |
|
1190 HBUINT16 searchRange; |
|
1191 HBUINT16 entrySelector; |
|
1192 HBUINT16 rangeShift; |
|
1193 |
|
1194 public: |
|
1195 DEFINE_SIZE_STATIC (8); |
|
1196 }; |
|
1197 |
|
1198 template <typename Type> |
|
1199 struct BinSearchArrayOf : SortedArrayOf<Type, BinSearchHeader> {}; |
|
1200 |
|
1201 |
|
1202 /* Lazy struct and blob loaders. */ |
|
1203 |
|
1204 /* Logic is shared between hb_lazy_loader_t and hb_table_lazy_loader_t */ |
|
1205 template <typename T> |
|
1206 struct hb_lazy_loader_t |
|
1207 { |
|
1208 inline void init (hb_face_t *face_) |
|
1209 { |
|
1210 face = face_; |
|
1211 instance = nullptr; |
|
1212 } |
|
1213 |
|
1214 inline void fini (void) |
|
1215 { |
|
1216 if (instance && instance != &Null(T)) |
|
1217 { |
|
1218 instance->fini(); |
|
1219 free (instance); |
|
1220 } |
|
1221 } |
|
1222 |
|
1223 inline const T* get (void) const |
|
1224 { |
|
1225 retry: |
|
1226 T *p = (T *) hb_atomic_ptr_get (&instance); |
|
1227 if (unlikely (!p)) |
|
1228 { |
|
1229 p = (T *) calloc (1, sizeof (T)); |
|
1230 if (unlikely (!p)) |
|
1231 p = const_cast<T *> (&Null(T)); |
|
1232 else |
|
1233 p->init (face); |
|
1234 if (unlikely (!hb_atomic_ptr_cmpexch (const_cast<T **>(&instance), nullptr, p))) |
|
1235 { |
|
1236 if (p != &Null(T)) |
|
1237 p->fini (); |
|
1238 goto retry; |
|
1239 } |
|
1240 } |
|
1241 return p; |
|
1242 } |
|
1243 |
|
1244 inline const T* operator-> (void) const |
|
1245 { |
|
1246 return get (); |
|
1247 } |
|
1248 |
|
1249 private: |
|
1250 hb_face_t *face; |
|
1251 T *instance; |
|
1252 }; |
|
1253 |
|
1254 /* Logic is shared between hb_lazy_loader_t and hb_table_lazy_loader_t */ |
|
1255 template <typename T> |
|
1256 struct hb_table_lazy_loader_t |
|
1257 { |
|
1258 inline void init (hb_face_t *face_) |
|
1259 { |
|
1260 face = face_; |
|
1261 blob = nullptr; |
|
1262 } |
|
1263 |
|
1264 inline void fini (void) |
|
1265 { |
|
1266 hb_blob_destroy (blob); |
|
1267 } |
|
1268 |
|
1269 inline const T* get (void) const |
|
1270 { |
|
1271 retry: |
|
1272 hb_blob_t *blob_ = (hb_blob_t *) hb_atomic_ptr_get (&blob); |
|
1273 if (unlikely (!blob_)) |
|
1274 { |
|
1275 blob_ = OT::Sanitizer<T>().sanitize (face->reference_table (T::tableTag)); |
|
1276 if (!hb_atomic_ptr_cmpexch (&blob, nullptr, blob_)) |
|
1277 { |
|
1278 hb_blob_destroy (blob_); |
|
1279 goto retry; |
|
1280 } |
|
1281 blob = blob_; |
|
1282 } |
|
1283 return blob_->as<T> (); |
|
1284 } |
|
1285 |
|
1286 inline const T* operator-> (void) const |
|
1287 { |
|
1288 return get(); |
|
1289 } |
|
1290 |
|
1291 private: |
|
1292 hb_face_t *face; |
|
1293 mutable hb_blob_t *blob; |
|
1294 }; |
|
1295 |
|
1296 |
|
1297 } /* namespace OT */ |
|
1298 |
|
1299 |
|
1300 #endif /* HB_OPEN_TYPE_PRIVATE_HH */ |
|