3 # This file is part of DisOrder.
4 # Copyright (C) 2007 Richard Kettlewell
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 # Generate Unicode support tables
24 # This script will download data from unicode.org if the required files
25 # aren't in the current directory.
27 # After modifying this script you should run:
28 # make -C lib rebuild-unicode check
30 # Things not supported yet:
31 # - SpecialCasing.txt data for case mapping
32 # - Title case offsets
33 # - Some kind of hinting for composition
36 # NB the generated files DO NOT offer a stable ABI and so are not immediately
37 # suitable for use in a general-purpose library. Things that would need to
39 # - Hide unidata.h from applications; it will never be ABI- or even API-stable.
40 # - Stablized General_Category values
41 # - Extend the unicode.h API to general utility rather than just what
49 print @_ or die "$!\n";
56 return join("-", map($d->{$_}, sort keys %$d));
61 # This can be varied to trade off the number of subtables against their size.
62 # 16 gave the smallest results last time I checked (on a Mac with a 32-bit
70 # Where to break the table. There is a huge empty section of the Unicode
71 # code space and we deal with this by simply leaving it out of the table.
72 # This complicates the lookup function a little but should not affect
73 # performance in the cases we care about.
74 our $break_start = 0x30000;
75 our $break_end = 0xE0000;
77 # Similarly we simply omit the very top of the table and sort it out in the
79 our $break_top = 0xE0200;
81 my %cats = (); # known general categories
82 my %data = (); # mapping of codepoints to information
83 my $max = 0; # maximum codepoint
84 my $maxccc = 0; # maximum combining class
86 my $minud = 0; # max/min upper case offset
88 my $minld = 0; # max/min lower case offset
90 # Make sure we have our desired input files. We explicitly specify a
91 # Unicode standard version to make sure that a given version of DisOrder
92 # supports a given version of Unicode.
95 my $lpath = basename($path);
97 system("wget http://www.unicode.org/Public/5.0.0/ucd/$path");
98 chmod(0444, $lpath) or die "$lpath: $!\n";
100 open(STDIN, "<$lpath") or die "$lpath: $!\n";
101 print STDERR "Reading $lpath...\n";
105 # Read the main data file
106 input("UnicodeData.txt");
109 my @f = split(/;/, $_);
110 my $c = hex($f[0]); # codepoint
112 die "$f[0] $name is in the break\n"
113 if $c >= $break_start && $c < $break_end;
114 my $gc = $f[2]; # General_Category
115 # Variuos GCs we don't expect to see in UnicodeData.txt
116 $cats{$gc} = 1; # always record all GCs
117 if($name =~ /first>/i) {
120 } elsif($name =~ /last>/i) {
125 die "unexpected Cn" if $gc eq 'Cn';
126 my $ccc = $f[3]; # Canonical_Combining_Class
127 my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
128 my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
129 my $slm = hex($f[13]) || $c; # Simple_Lowercase_Mapping
130 # recalculate the upper/lower case mappings as offsets
133 # update bounds on various values
134 $maxccc = $ccc if $ccc > $maxccc; # assumed never to be -ve
135 $minud = $ud if $ud < $minud;
136 $maxud = $ud if $ud > $maxud;
137 $minld = $ld if $ld < $minld;
138 $maxld = $ld if $ld > $maxld;
140 printf STDERR "> range %04X-%04X is %s\n", $start, $end, $gc;
142 for($c = $start; $c <= $end; ++$c) {
151 # This is a compatibility decomposition
155 $d->{decomp} = [map(hex($_), split(/\s+/, $dm))];
160 $max = $end if $end > $max;
163 sub read_prop_with_ranges {
171 my ($range, $propval) = split(/\s*;\s*/, $_);
172 if($range =~ /(.*)\.\.(.*)/) {
173 for my $c (hex($1) .. hex($2)) {
174 $data{$c}->{$propkey} = $propval;
178 $data{$c}->{$propkey} = $propval;
184 read_prop_with_ranges("auxiliary/GraphemeBreakProperty.txt", "gbreak");
185 read_prop_with_ranges("auxiliary/WordBreakProperty.txt", "wbreak");
186 read_prop_with_ranges("auxiliary/SentenceBreakProperty.txt", "sbreak");
188 # Compute the full list and fill in the Extend category properly
192 for my $c (keys %data) {
193 if(!exists $data{$c}->{gbreak}) {
194 $data{$c}->{gbreak} = 'Other';
196 $gbreak{$data{$c}->{gbreak}} = 1;
198 if(!exists $data{$c}->{wbreak}) {
199 if($data{$c}->{gbreak} eq 'Extend') {
200 $data{$c}->{wbreak} = 'Extend';
202 $data{$c}->{wbreak} = 'Other';
205 $wbreak{$data{$c}->{wbreak}} = 1;
207 if(!exists $data{$c}->{sbreak}) {
208 if($data{$c}->{gbreak} eq 'Extend') {
209 $data{$c}->{sbreak} = 'Extend';
211 $data{$c}->{sbreak} = 'Other';
214 $sbreak{$data{$c}->{sbreak}} = 1;
217 # Various derived properties
218 input("DerivedNormalizationProps.txt");
223 my @f = split(/\s*;\s*/, $_);
227 my ($range, $propkey, $propval) = @f;
228 if($range =~ /(.*)\.\.(.*)/) {
229 for my $c (hex($1) .. hex($2)) {
230 $data{$c}->{$propkey} = $propval
234 $data{$c}->{$propkey} = $propval
238 # Round up the maximum value to a whole number of subtables
239 $max += ($modulus - 1) - ($max % $modulus);
241 # Private use characters
242 # We only fill in values below $max, utf32__unidata()
249 for(my $c = 0xE000; $c <= 0xF8FF && $c <= $max; ++$c) {
252 for(my $c = 0xF0000; $c <= 0xFFFFD && $c <= $max; ++$c) {
255 for(my $c = 0x100000; $c <= 0x10FFFD && $c <= $max; ++$c) {
259 # Anything left is not assigned
261 "gc" => "Cn", # not assigned
266 for(my $c = 0; $c <= $max; ++$c) {
267 if(!exists $data{$c}) {
270 if(!exists $data{$c}->{wbreak}) {
271 $data{$c}->{wbreak} = 'Other';
273 if(!exists $data{$c}->{gbreak}) {
274 $data{$c}->{gbreak} = 'Other';
276 if(!exists $data{$c}->{sbreak}) {
277 $data{$c}->{sbreak} = 'Other';
282 # Read the casefolding data too
283 input("CaseFolding.txt");
286 next if /^\#/ or $_ eq '';
287 my @f = split(/\s*;\s*/, $_);
288 # Full case folding means use status C and F.
289 # We discard status T, Turkish users may wish to change this.
290 if($f[1] eq 'C' or $f[1] eq 'F') {
292 $data{$c}->{casefold} = $f[2];
293 # We are particularly interest in combining characters that
294 # case-fold to non-combining characters, or characters that
295 # case-fold to sequences with combining characters in non-initial
296 # positions, as these required decomposiiton before case-folding
297 my @d = map(hex($_), split(/\s+/, $data{$c}->{casefold}));
298 if($data{$c}->{ccc} != 0) {
299 # This is a combining character
300 if($data{$d[0]}->{ccc} == 0) {
301 # The first character of its case-folded form is NOT
302 # a combining character. The field name is the example
303 # explicitly mentioned in the spec.
304 $data{$c}->{ypogegrammeni} = 1;
307 # This is a non-combining character; inspect the non-initial
308 # code points of the case-folded sequence
310 if(grep($data{$_}->{ccc} != 0, @d)) {
311 # Some non-initial code point in the case-folded for is NOT a
312 # a combining character.
313 $data{$c}->{ypogegrammeni} = 1;
319 # Generate the header file
320 print STDERR "Generating unidata.h...\n";
321 open(STDOUT, ">unidata.h") or die "unidata.h: $!\n";
323 out("/* Automatically generated file, see scripts/make-unidata */\n",
324 "#ifndef UNIDATA_H\n",
325 "#define UNIDATA_H\n");
327 # TODO choose stable values for General_Category
328 out("enum unicode_General_Category {\n",
330 map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
332 out("enum unicode_Grapheme_Break {\n",
334 map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
336 out("extern const char *const unicode_Grapheme_Break_names[];\n");
338 out("enum unicode_Word_Break {\n",
340 map(" unicode_Word_Break_$_", sort keys %wbreak)),
342 out("extern const char *const unicode_Word_Break_names[];\n");
344 out("enum unicode_Sentence_Break {\n",
346 map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
348 out("extern const char *const unicode_Sentence_Break_names[];\n");
350 out("enum unicode_flags {\n",
351 " unicode_normalize_before_casefold = 1,\n",
352 " unicode_compatibility_decomposition = 2\n",
356 # Choose the narrowest type that will fit the required values
358 my ($min, $max) = @_;
360 return "char" if $max <= 127;
361 return "unsigned char" if $max <= 255;
362 return "int16_t" if $max < 32767;
363 return "uint16_t" if $max < 65535;
366 return "char" if $min >= -127 && $max <= 127;
367 return "int16_t" if $min >= -32767 && $max <= 32767;
372 out("struct unidata {\n",
373 # decomposition (canonical or compatibility;
374 # unicode_compatibility_decomposition distinguishes) or NULL
375 " const uint32_t *decomp;\n",
377 # case-folded string or NULL
378 " const uint32_t *casefold;\n",
380 # composed characters that start with this code point. This only
381 # includes primary composites, i.e. the decomposition mapping is
382 # canonical and this code point is not in the exclusion table.
383 " const uint32_t *composed;\n",
385 # " ".choosetype($minud, $maxud)." upper_offset;\n",
386 # " ".choosetype($minld, $maxld)." lower_offset;\n",
388 # canonical combining class
389 " ".choosetype(0, $maxccc)." ccc;\n",
390 " char general_category;\n",
392 # see unicode_flags enum
394 " char grapheme_break;\n",
395 " char word_break;\n",
396 " char sentence_break;\n",
398 # decomp and casefold do have have non-BMP characters, so we
399 # can't use a simple 16-bit table. We could use UTF-8 or UTF-16
400 # though, saving a bit of space (probably not that much...) at the
401 # cost of marginally reduced performance and additional complexity
403 out("extern const struct unidata *const unidata[];\n");
405 out("extern const struct unicode_utf8_row {\n",
407 " uint8_t min2, max2;\n",
408 "} unicode_utf8_valid[];\n");
410 out("#define UNICODE_NCHARS ", ($max + 1), "\n");
411 out("#define UNICODE_MODULUS $modulus\n");
412 out("#define UNICODE_BREAK_START $break_start\n");
413 out("#define UNICODE_BREAK_END $break_end\n");
414 out("#define UNICODE_BREAK_TOP $break_top\n");
418 close STDOUT or die "unidata.h: $!\n";
420 print STDERR "Generating unidata.c...\n";
421 open(STDOUT, ">unidata.c") or die "unidata.c: $!\n";
423 out("/* Automatically generated file, see scripts/make-unidata */\n",
424 "#include <config.h>\n",
425 "#include \"types.h\"\n",
426 "#include \"unidata.h\"\n");
428 # Short aliases to keep .c file small
430 out(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
432 out(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
434 out(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
436 out(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
438 out("#define NBC unicode_normalize_before_casefold\n");
439 out("#define CD unicode_compatibility_decomposition\n");
441 # Names for *_Break properties
442 out("const char *const unicode_Grapheme_Break_names[] = {\n",
444 map(" \"$_\"", sort keys %gbreak)),
446 out("const char *const unicode_Word_Break_names[] = {\n",
448 map(" \"$_\"", sort keys %wbreak)),
450 out("const char *const unicode_Sentence_Break_names[] = {\n",
452 map(" \"$_\"", sort keys %sbreak)),
459 out("static const uint32_t ");
461 my $s = join(",", @_);
462 if(!exists $ddnums{$s}) {
468 out("dd$ddnum\[]={$s}");
469 $ddnums{$s} = $ddnum++;
473 return "dd$ddnums{$s}";
476 # Generate the decomposition mapping tables. We look out for duplicates
477 # in order to save space and report this as decompsaved at the end. In
478 # Unicode 5.0.0 this saves 1795 entries, which is at least 14Kbytes.
479 print STDERR "> decomposition mappings\n";
480 for(my $c = 0; $c <= $max; ++$c) {
481 if(exists $data{$c} && exists $data{$c}->{decomp}) {
482 $data{$c}->{decompsym} = dedupe(@{$data{$c}->{decomp}}, 0);
486 print STDERR "> composition mappings\n";
487 # First we must generate the mapping of each code point to possible
489 for(my $c = 0; $c <= $max; ++$c) {
491 && exists $data{$c}->{decomp}
492 && !exists $data{$c}->{compat}
493 && !$data{$c}->{Full_Composition_Exclusion}) {
494 # $c has a non-excluded canonical decomposition, i.e. it is
495 # a primary composite. Find the first code point of the decomposition
496 my $first = ${$data{$c}->{decomp}}[0];
497 if(!exists $data{$first}->{compose}) {
498 $data{$first}->{compose} = [$first];
500 push(@{$data{$first}->{compose}}, $first);
504 for(my $c = 0; $c <= $max; ++$c) {
505 if(exists $data{$c} && exists $data{$c}->{compose}) {
506 $data{$c}->{compsym} = dedupe(@{$data{$c}->{compose}}, 0);
510 # ...and the case folding table. Again we compress equal entries to save
511 # space. In Unicode 5.0.0 this saves 51 entries or at least 408 bytes.
512 # This doesns't seem as worthwhile as the decomposition mapping saving above.
513 print STDERR "> case-fold mappings\n";
514 for(my $c = 0; $c <= $max; ++$c) {
515 if(exists $data{$c} && exists $data{$c}->{casefold}) {
516 $data{$c}->{cfsym} = dedupe(map(hex($_), split(/\s+/,
517 $data{$c}->{casefold})),
522 # End of de-dupable arrays
525 # Visit all the $modulus-character blocks in turn and generate the
526 # required subtables. As above we spot duplicates to save space. In
527 # Unicode 5.0.0 with $modulus=128 and current table data this saves
528 # 1372 subtables or at least three and a half megabytes on 32-bit
530 print STDERR "> subtables\n";
531 my %subtable = (); # base->subtable number
532 my %subtableno = (); # subtable number -> content
533 my $subtablecounter = 0; # counter for subtable numbers
534 my $subtablessaved = 0; # number of tables saved
535 for(my $base = 0; $base <= $max; $base += $modulus) {
536 next if $base >= $break_start && $base < $break_end;
537 next if $base >= $break_top;
539 for(my $c = $base; $c < $base + $modulus; ++$c) {
541 my $decompsym = ($data{$c}->{decompsym} or "0");
542 my $cfsym = ($data{$c}->{cfsym} or "0");
543 my $compsym = ($data{$c}->{compsym} or "0");
545 if($data{$c}->{ypogegrammeni}) {
548 if($data{$c}->{compat}) {
551 my $flags = @flags ? join("|", @flags) : 0;
567 my $t = join(",\n", @t);
568 if(!exists $subtable{$t}) {
569 out(sprintf("/* %04X-%04X */\n", $base, $base + $modulus - 1));
570 out("static const struct unidata st$subtablecounter\[] = {\n",
573 $subtable{$t} = $subtablecounter++;
577 $subtableno{$base} = $subtable{$t};
580 print STDERR "> main table\n";
581 out("const struct unidata *const unidata[]={\n");
582 for(my $base = 0; $base <= $max; $base += $modulus) {
583 next if $base >= $break_start && $base < $break_end;
584 next if $base >= $break_top;
585 #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
586 out("st$subtableno{$base},\n");
590 print STDERR "> UTF-8 table\n";
591 out("const struct unicode_utf8_row unicode_utf8_valid[] = {\n");
592 for(my $c = 0; $c <= 0x7F; ++$c) {
593 out(" { 1, 0, 0 }, /* $c */\n");
595 for(my $c = 0x80; $c < 0xC2; ++$c) {
596 out(" { 0, 0, 0 }, /* $c */\n");
598 for(my $c = 0xC2; $c <= 0xDF; ++$c) {
599 out(" { 2, 0x80, 0xBF }, /* $c */\n");
601 for(my $c = 0xE0; $c <= 0xE0; ++$c) {
602 out(" { 3, 0xA0, 0xBF }, /* $c */\n");
604 for(my $c = 0xE1; $c <= 0xEC; ++$c) {
605 out(" { 3, 0x80, 0xBF }, /* $c */\n");
607 for(my $c = 0xED; $c <= 0xED; ++$c) {
608 out(" { 3, 0x80, 0x9F }, /* $c */\n");
610 for(my $c = 0xEE; $c <= 0xEF; ++$c) {
611 out(" { 3, 0x80, 0xBF }, /* $c */\n");
613 for(my $c = 0xF0; $c <= 0xF0; ++$c) {
614 out(" { 4, 0x90, 0xBF }, /* $c */\n");
616 for(my $c = 0xF1; $c <= 0xF3; ++$c) {
617 out(" { 4, 0x80, 0xBF }, /* $c */\n");
619 for(my $c = 0xF4; $c <= 0xF4; ++$c) {
620 out(" { 4, 0x80, 0x8F }, /* $c */\n");
622 for(my $c = 0xF5; $c <= 0xFF; ++$c) {
623 out(" { 0, 0, 0 }, /* $c */\n");
627 close STDOUT or die "unidata.c: $!\n";
629 print STDERR "Done.\n\n";
630 printf STDERR "modulus=%d\n", $modulus;
631 printf STDERR "max=%04X\n", $max;
632 print STDERR "subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
633 print STDERR "ddsaved=$ddsaved\n";