|
1 /* |
|
2 * Copyright © 2011,2012 Google, Inc. |
|
3 * |
|
4 * This is part of HarfBuzz, a text shaping library. |
|
5 * |
|
6 * Permission is hereby granted, without written agreement and without |
|
7 * license or royalty fees, to use, copy, modify, and distribute this |
|
8 * software and its documentation for any purpose, provided that the |
|
9 * above copyright notice and the following two paragraphs appear in |
|
10 * all copies of this software. |
|
11 * |
|
12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
|
13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
|
14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
|
15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
|
16 * DAMAGE. |
|
17 * |
|
18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
|
19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
|
20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
|
21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
|
22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
|
23 * |
|
24 * Google Author(s): Behdad Esfahbod |
|
25 */ |
|
26 |
|
27 #include "hb-ot-shape-normalize-private.hh" |
|
28 #include "hb-ot-shape-complex-private.hh" |
|
29 #include "hb-ot-shape-private.hh" |
|
30 |
|
31 |
|
32 /* |
|
33 * HIGHLEVEL DESIGN: |
|
34 * |
|
35 * This file exports one main function: _hb_ot_shape_normalize(). |
|
36 * |
|
37 * This function closely reflects the Unicode Normalization Algorithm, |
|
38 * yet it's different. |
|
39 * |
|
40 * Each shaper specifies whether it prefers decomposed (NFD) or composed (NFC). |
|
41 * The logic however tries to use whatever the font can support. |
|
42 * |
|
43 * In general what happens is that: each grapheme is decomposed in a chain |
|
44 * of 1:2 decompositions, marks reordered, and then recomposed if desired, |
|
45 * so far it's like Unicode Normalization. However, the decomposition and |
|
46 * recomposition only happens if the font supports the resulting characters. |
|
47 * |
|
48 * The goals are: |
|
49 * |
|
50 * - Try to render all canonically equivalent strings similarly. To really |
|
51 * achieve this we have to always do the full decomposition and then |
|
52 * selectively recompose from there. It's kinda too expensive though, so |
|
53 * we skip some cases. For example, if composed is desired, we simply |
|
54 * don't touch 1-character clusters that are supported by the font, even |
|
55 * though their NFC may be different. |
|
56 * |
|
57 * - When a font has a precomposed character for a sequence but the 'ccmp' |
|
58 * feature in the font is not adequate, use the precomposed character |
|
59 * which typically has better mark positioning. |
|
60 * |
|
61 * - When a font does not support a combining mark, but supports it precomposed |
|
62 * with previous base, use that. This needs the itemizer to have this |
|
63 * knowledge too. We need to provide assistance to the itemizer. |
|
64 * |
|
65 * - When a font does not support a character but supports its decomposition, |
|
66 * well, use the decomposition (preferring the canonical decomposition, but |
|
67 * falling back to the compatibility decomposition if necessary). The |
|
68 * compatibility decomposition is really nice to have, for characters like |
|
69 * ellipsis, or various-sized space characters. |
|
70 * |
|
71 * - The complex shapers can customize the compose and decompose functions to |
|
72 * offload some of their requirements to the normalizer. For example, the |
|
73 * Indic shaper may want to disallow recomposing of two matras. |
|
74 * |
|
75 * - We try compatibility decomposition if decomposing through canonical |
|
76 * decomposition alone failed to find a sequence that the font supports. |
|
77 * We don't try compatibility decomposition recursively during the canonical |
|
78 * decomposition phase. This has minimal impact. There are only a handful |
|
79 * of Greek letter that have canonical decompositions that include characters |
|
80 * with compatibility decomposition. Those can be found using this command: |
|
81 * |
|
82 * egrep "`echo -n ';('; grep ';<' UnicodeData.txt | cut -d';' -f1 | tr '\n' '|'; echo ') '`" UnicodeData.txt |
|
83 */ |
|
84 |
|
85 static bool |
|
86 decompose_unicode (const hb_ot_shape_normalize_context_t *c, |
|
87 hb_codepoint_t ab, |
|
88 hb_codepoint_t *a, |
|
89 hb_codepoint_t *b) |
|
90 { |
|
91 return c->unicode->decompose (ab, a, b); |
|
92 } |
|
93 |
|
94 static bool |
|
95 compose_unicode (const hb_ot_shape_normalize_context_t *c, |
|
96 hb_codepoint_t a, |
|
97 hb_codepoint_t b, |
|
98 hb_codepoint_t *ab) |
|
99 { |
|
100 return c->unicode->compose (a, b, ab); |
|
101 } |
|
102 |
|
103 static inline void |
|
104 set_glyph (hb_glyph_info_t &info, hb_font_t *font) |
|
105 { |
|
106 font->get_glyph (info.codepoint, 0, &info.glyph_index()); |
|
107 } |
|
108 |
|
109 static inline void |
|
110 output_char (hb_buffer_t *buffer, hb_codepoint_t unichar, hb_codepoint_t glyph) |
|
111 { |
|
112 buffer->cur().glyph_index() = glyph; |
|
113 buffer->output_glyph (unichar); |
|
114 _hb_glyph_info_set_unicode_props (&buffer->prev(), buffer->unicode); |
|
115 } |
|
116 |
|
117 static inline void |
|
118 next_char (hb_buffer_t *buffer, hb_codepoint_t glyph) |
|
119 { |
|
120 buffer->cur().glyph_index() = glyph; |
|
121 buffer->next_glyph (); |
|
122 } |
|
123 |
|
124 static inline void |
|
125 skip_char (hb_buffer_t *buffer) |
|
126 { |
|
127 buffer->skip_glyph (); |
|
128 } |
|
129 |
|
130 /* Returns 0 if didn't decompose, number of resulting characters otherwise. */ |
|
131 static inline unsigned int |
|
132 decompose (const hb_ot_shape_normalize_context_t *c, bool shortest, hb_codepoint_t ab) |
|
133 { |
|
134 hb_codepoint_t a, b, a_glyph, b_glyph; |
|
135 hb_buffer_t * const buffer = c->buffer; |
|
136 hb_font_t * const font = c->font; |
|
137 |
|
138 if (!c->decompose (c, ab, &a, &b) || |
|
139 (b && !font->get_glyph (b, 0, &b_glyph))) |
|
140 return 0; |
|
141 |
|
142 bool has_a = font->get_glyph (a, 0, &a_glyph); |
|
143 if (shortest && has_a) { |
|
144 /* Output a and b */ |
|
145 output_char (buffer, a, a_glyph); |
|
146 if (likely (b)) { |
|
147 output_char (buffer, b, b_glyph); |
|
148 return 2; |
|
149 } |
|
150 return 1; |
|
151 } |
|
152 |
|
153 unsigned int ret; |
|
154 if ((ret = decompose (c, shortest, a))) { |
|
155 if (b) { |
|
156 output_char (buffer, b, b_glyph); |
|
157 return ret + 1; |
|
158 } |
|
159 return ret; |
|
160 } |
|
161 |
|
162 if (has_a) { |
|
163 output_char (buffer, a, a_glyph); |
|
164 if (likely (b)) { |
|
165 output_char (buffer, b, b_glyph); |
|
166 return 2; |
|
167 } |
|
168 return 1; |
|
169 } |
|
170 |
|
171 return 0; |
|
172 } |
|
173 |
|
174 /* Returns 0 if didn't decompose, number of resulting characters otherwise. */ |
|
175 static inline unsigned int |
|
176 decompose_compatibility (const hb_ot_shape_normalize_context_t *c, hb_codepoint_t u) |
|
177 { |
|
178 unsigned int len, i; |
|
179 hb_codepoint_t decomposed[HB_UNICODE_MAX_DECOMPOSITION_LEN]; |
|
180 hb_codepoint_t glyphs[HB_UNICODE_MAX_DECOMPOSITION_LEN]; |
|
181 |
|
182 len = c->buffer->unicode->decompose_compatibility (u, decomposed); |
|
183 if (!len) |
|
184 return 0; |
|
185 |
|
186 for (i = 0; i < len; i++) |
|
187 if (!c->font->get_glyph (decomposed[i], 0, &glyphs[i])) |
|
188 return 0; |
|
189 |
|
190 for (i = 0; i < len; i++) |
|
191 output_char (c->buffer, decomposed[i], glyphs[i]); |
|
192 |
|
193 return len; |
|
194 } |
|
195 |
|
196 static inline void |
|
197 decompose_current_character (const hb_ot_shape_normalize_context_t *c, bool shortest) |
|
198 { |
|
199 hb_buffer_t * const buffer = c->buffer; |
|
200 hb_codepoint_t u = buffer->cur().codepoint; |
|
201 hb_codepoint_t glyph; |
|
202 |
|
203 /* Kind of a cute waterfall here... */ |
|
204 if (shortest && c->font->get_glyph (u, 0, &glyph)) |
|
205 next_char (buffer, glyph); |
|
206 else if (decompose (c, shortest, u)) |
|
207 skip_char (buffer); |
|
208 else if (!shortest && c->font->get_glyph (u, 0, &glyph)) |
|
209 next_char (buffer, glyph); |
|
210 else if (decompose_compatibility (c, u)) |
|
211 skip_char (buffer); |
|
212 else |
|
213 next_char (buffer, glyph); /* glyph is initialized in earlier branches. */ |
|
214 } |
|
215 |
|
216 static inline void |
|
217 handle_variation_selector_cluster (const hb_ot_shape_normalize_context_t *c, unsigned int end, bool short_circuit) |
|
218 { |
|
219 /* TODO Currently if there's a variation-selector we give-up, it's just too hard. */ |
|
220 hb_buffer_t * const buffer = c->buffer; |
|
221 hb_font_t * const font = c->font; |
|
222 for (; buffer->idx < end - 1;) { |
|
223 if (unlikely (buffer->unicode->is_variation_selector (buffer->cur(+1).codepoint))) { |
|
224 /* The next two lines are some ugly lines... But work. */ |
|
225 if (font->get_glyph (buffer->cur().codepoint, buffer->cur(+1).codepoint, &buffer->cur().glyph_index())) |
|
226 { |
|
227 buffer->replace_glyphs (2, 1, &buffer->cur().codepoint); |
|
228 } |
|
229 else |
|
230 { |
|
231 /* Just pass on the two characters separately, let GSUB do its magic. */ |
|
232 set_glyph (buffer->cur(), font); |
|
233 buffer->next_glyph (); |
|
234 set_glyph (buffer->cur(), font); |
|
235 buffer->next_glyph (); |
|
236 } |
|
237 /* Skip any further variation selectors. */ |
|
238 while (buffer->idx < end && unlikely (buffer->unicode->is_variation_selector (buffer->cur().codepoint))) |
|
239 { |
|
240 set_glyph (buffer->cur(), font); |
|
241 buffer->next_glyph (); |
|
242 } |
|
243 } else { |
|
244 set_glyph (buffer->cur(), font); |
|
245 buffer->next_glyph (); |
|
246 } |
|
247 } |
|
248 if (likely (buffer->idx < end)) { |
|
249 set_glyph (buffer->cur(), font); |
|
250 buffer->next_glyph (); |
|
251 } |
|
252 } |
|
253 |
|
254 static inline void |
|
255 decompose_multi_char_cluster (const hb_ot_shape_normalize_context_t *c, unsigned int end, bool short_circuit) |
|
256 { |
|
257 hb_buffer_t * const buffer = c->buffer; |
|
258 for (unsigned int i = buffer->idx; i < end; i++) |
|
259 if (unlikely (buffer->unicode->is_variation_selector (buffer->info[i].codepoint))) { |
|
260 handle_variation_selector_cluster (c, end, short_circuit); |
|
261 return; |
|
262 } |
|
263 |
|
264 while (buffer->idx < end) |
|
265 decompose_current_character (c, short_circuit); |
|
266 } |
|
267 |
|
268 static inline void |
|
269 decompose_cluster (const hb_ot_shape_normalize_context_t *c, unsigned int end, bool might_short_circuit, bool always_short_circuit) |
|
270 { |
|
271 if (likely (c->buffer->idx + 1 == end)) |
|
272 decompose_current_character (c, might_short_circuit); |
|
273 else |
|
274 decompose_multi_char_cluster (c, end, always_short_circuit); |
|
275 } |
|
276 |
|
277 |
|
278 static int |
|
279 compare_combining_class (const hb_glyph_info_t *pa, const hb_glyph_info_t *pb) |
|
280 { |
|
281 unsigned int a = _hb_glyph_info_get_modified_combining_class (pa); |
|
282 unsigned int b = _hb_glyph_info_get_modified_combining_class (pb); |
|
283 |
|
284 return a < b ? -1 : a == b ? 0 : +1; |
|
285 } |
|
286 |
|
287 |
|
288 void |
|
289 _hb_ot_shape_normalize (const hb_ot_shape_plan_t *plan, |
|
290 hb_buffer_t *buffer, |
|
291 hb_font_t *font) |
|
292 { |
|
293 if (unlikely (!buffer->len)) return; |
|
294 |
|
295 _hb_buffer_assert_unicode_vars (buffer); |
|
296 |
|
297 hb_ot_shape_normalization_mode_t mode = plan->shaper->normalization_preference; |
|
298 const hb_ot_shape_normalize_context_t c = { |
|
299 plan, |
|
300 buffer, |
|
301 font, |
|
302 buffer->unicode, |
|
303 plan->shaper->decompose ? plan->shaper->decompose : decompose_unicode, |
|
304 plan->shaper->compose ? plan->shaper->compose : compose_unicode |
|
305 }; |
|
306 |
|
307 bool always_short_circuit = mode == HB_OT_SHAPE_NORMALIZATION_MODE_NONE; |
|
308 bool might_short_circuit = always_short_circuit || |
|
309 (mode != HB_OT_SHAPE_NORMALIZATION_MODE_DECOMPOSED && |
|
310 mode != HB_OT_SHAPE_NORMALIZATION_MODE_COMPOSED_DIACRITICS_NO_SHORT_CIRCUIT); |
|
311 unsigned int count; |
|
312 |
|
313 /* We do a fairly straightforward yet custom normalization process in three |
|
314 * separate rounds: decompose, reorder, recompose (if desired). Currently |
|
315 * this makes two buffer swaps. We can make it faster by moving the last |
|
316 * two rounds into the inner loop for the first round, but it's more readable |
|
317 * this way. */ |
|
318 |
|
319 |
|
320 /* First round, decompose */ |
|
321 |
|
322 buffer->clear_output (); |
|
323 count = buffer->len; |
|
324 for (buffer->idx = 0; buffer->idx < count;) |
|
325 { |
|
326 unsigned int end; |
|
327 for (end = buffer->idx + 1; end < count; end++) |
|
328 if (likely (!HB_UNICODE_GENERAL_CATEGORY_IS_MARK (_hb_glyph_info_get_general_category (&buffer->info[end])))) |
|
329 break; |
|
330 |
|
331 decompose_cluster (&c, end, might_short_circuit, always_short_circuit); |
|
332 } |
|
333 buffer->swap_buffers (); |
|
334 |
|
335 |
|
336 /* Second round, reorder (inplace) */ |
|
337 |
|
338 count = buffer->len; |
|
339 for (unsigned int i = 0; i < count; i++) |
|
340 { |
|
341 if (_hb_glyph_info_get_modified_combining_class (&buffer->info[i]) == 0) |
|
342 continue; |
|
343 |
|
344 unsigned int end; |
|
345 for (end = i + 1; end < count; end++) |
|
346 if (_hb_glyph_info_get_modified_combining_class (&buffer->info[end]) == 0) |
|
347 break; |
|
348 |
|
349 /* We are going to do a O(n^2). Only do this if the sequence is short. */ |
|
350 if (end - i > 10) { |
|
351 i = end; |
|
352 continue; |
|
353 } |
|
354 |
|
355 buffer->sort (i, end, compare_combining_class); |
|
356 |
|
357 i = end; |
|
358 } |
|
359 |
|
360 |
|
361 if (mode == HB_OT_SHAPE_NORMALIZATION_MODE_NONE || |
|
362 mode == HB_OT_SHAPE_NORMALIZATION_MODE_DECOMPOSED) |
|
363 return; |
|
364 |
|
365 /* Third round, recompose */ |
|
366 |
|
367 /* As noted in the comment earlier, we don't try to combine |
|
368 * ccc=0 chars with their previous Starter. */ |
|
369 |
|
370 buffer->clear_output (); |
|
371 count = buffer->len; |
|
372 unsigned int starter = 0; |
|
373 buffer->next_glyph (); |
|
374 while (buffer->idx < count) |
|
375 { |
|
376 hb_codepoint_t composed, glyph; |
|
377 if (/* We don't try to compose a non-mark character with it's preceding starter. |
|
378 * This is both an optimization to avoid trying to compose every two neighboring |
|
379 * glyphs in most scripts AND a desired feature for Hangul. Apparently Hangul |
|
380 * fonts are not designed to mix-and-match pre-composed syllables and Jamo. */ |
|
381 HB_UNICODE_GENERAL_CATEGORY_IS_MARK (_hb_glyph_info_get_general_category (&buffer->cur())) && |
|
382 /* If there's anything between the starter and this char, they should have CCC |
|
383 * smaller than this character's. */ |
|
384 (starter == buffer->out_len - 1 || |
|
385 _hb_glyph_info_get_modified_combining_class (&buffer->prev()) < _hb_glyph_info_get_modified_combining_class (&buffer->cur())) && |
|
386 /* And compose. */ |
|
387 c.compose (&c, |
|
388 buffer->out_info[starter].codepoint, |
|
389 buffer->cur().codepoint, |
|
390 &composed) && |
|
391 /* And the font has glyph for the composite. */ |
|
392 font->get_glyph (composed, 0, &glyph)) |
|
393 { |
|
394 /* Composes. */ |
|
395 buffer->next_glyph (); /* Copy to out-buffer. */ |
|
396 if (unlikely (buffer->in_error)) |
|
397 return; |
|
398 buffer->merge_out_clusters (starter, buffer->out_len); |
|
399 buffer->out_len--; /* Remove the second composable. */ |
|
400 /* Modify starter and carry on. */ |
|
401 buffer->out_info[starter].codepoint = composed; |
|
402 buffer->out_info[starter].glyph_index() = glyph; |
|
403 _hb_glyph_info_set_unicode_props (&buffer->out_info[starter], buffer->unicode); |
|
404 |
|
405 continue; |
|
406 } |
|
407 |
|
408 /* Blocked, or doesn't compose. */ |
|
409 buffer->next_glyph (); |
|
410 |
|
411 if (_hb_glyph_info_get_modified_combining_class (&buffer->prev()) == 0) |
|
412 starter = buffer->out_len - 1; |
|
413 } |
|
414 buffer->swap_buffers (); |
|
415 |
|
416 } |