3 # This file is part of DisOrder.
4 # Copyright (C) 2007 Richard Kettlewell
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 # Generate Unicode support tables
24 # This script will download data from unicode.org if the required files
25 # aren't in the current directory.
27 # After modifying this script you should run:
28 # make -C lib rebuild-unicode check
30 # Things not supported yet:
31 # - SpecialCasing.txt data for case mapping
32 # - Title case offsets
33 # - Some kind of hinting for composition
34 # - Word boundary support
37 # NB the generated files DO NOT offer a stable ABI and so are not immediately
38 # suitable for use in a general-purpose library. Things that would need to
40 # - Hide unidata.h from applications; it will never be ABI- or even API-stable.
41 # - Stablized General_Category values
42 # - Extend the unicode.h API to general utility rather than just what
50 print @_ or die "$!\n";
57 return join("-", map($d->{$_}, sort keys %$d));
62 # This can be varied to trade off the number of subtables against their size.
65 my %cats = (); # known general categories
66 my %data = (); # mapping of codepoints to information
67 my $max = 0; # maximum codepoint
68 my $maxccc = 0; # maximum combining class
70 my $minud = 0; # max/min upper case offset
72 my $minld = 0; # max/min lower case offset
74 # Make sure we have our desired input files. We explicitly specify a
75 # Unicode standard version to make sure that a given version of DisOrder
76 # supports a given version of Unicode.
79 my $lpath = basename($path);
81 system("wget http://www.unicode.org/Public/5.0.0/ucd/$path");
82 chmod(0444, $lpath) or die "$lpath: $!\n";
84 open(STDIN, "<$lpath") or die "$lpath: $!\n";
88 # Read the main data file
89 input("UnicodeData.txt");
91 my @f = split(/;/, $_);
92 my $c = hex($f[0]); # codepoint
93 next if $c >= 0xE0000; # ignore various high-numbered stuff
94 # TODO justify this exclusion!
96 my $gc = $f[2]; # General_Category
97 $cats{$gc} = 1; # always record all GCs
98 next if $name =~ /(first|last)>/i; # ignore placeholders
99 my $ccc = $f[3]; # Canonical_Combining_Class
100 my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
101 my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
102 my $slm = hex($f[13]) || $c; # Simple_Lowercase_Mapping
103 # recalculate the upper/lower case mappings as offsets
106 # update bounds on various values
107 $maxccc = $ccc if $ccc > $maxccc; # assumed never to be -ve
108 $minud = $ud if $ud < $minud;
109 $maxud = $ud if $ud > $maxud;
110 $minld = $ld if $ld < $minld;
111 $maxld = $ld if $ld > $maxld;
120 # This is a canonical decomposition
121 $data{$c}->{canon} = $dm;
122 $data{$c}->{compat} = $dm;
124 # This is only a compatibility decomposition
126 $data{$c}->{compat} = $dm;
130 $max = $c if $c > $max;
133 sub read_prop_with_ranges {
141 my ($range, $propval) = split(/\s*;\s*/, $_);
142 if($range =~ /(.*)\.\.(.*)/) {
143 for my $c (hex($1) .. hex($2)) {
144 if(exists $data{$c}) {
145 $data{$c}->{$propkey} = $propval;
150 if(exists $data{$c}) {
151 $data{$c}->{$propkey} = $propval;
158 # NB we do this BEFORE filling in blanks so that the Hangul characters
159 # don't get filled in; we can compute their properties mechanically.
160 read_prop_with_ranges("auxiliary/GraphemeBreakProperty.txt", "gbreak");
161 read_prop_with_ranges("auxiliary/WordBreakProperty.txt", "wbreak");
162 read_prop_with_ranges("auxiliary/SentenceBreakProperty.txt", "sbreak");
164 # Compute the full list and fill in the Extend category properly
168 for my $c (keys %data) {
169 if(!exists $data{$c}->{gbreak}) {
170 $data{$c}->{gbreak} = 'Other';
172 $gbreak{$data{$c}->{gbreak}} = 1;
174 if(!exists $data{$c}->{wbreak}) {
175 if($data{$c}->{gbreak} eq 'Extend') {
176 $data{$c}->{wbreak} = 'Extend';
178 $data{$c}->{wbreak} = 'Other';
181 $wbreak{$data{$c}->{wbreak}} = 1;
183 if(!exists $data{$c}->{sbreak}) {
184 if($data{$c}->{gbreak} eq 'Extend') {
185 $data{$c}->{sbreak} = 'Extend';
187 $data{$c}->{sbreak} = 'Other';
190 $sbreak{$data{$c}->{sbreak}} = 1;
193 # Round up the maximum value to a whole number of subtables
194 $max += ($modulus - 1) - ($max % $modulus);
196 # Make sure there are no gaps
197 for(my $c = 0; $c <= $max; ++$c) {
198 if(!exists $data{$c}) {
200 "gc" => "Cn", # not assigned
212 # Read the casefolding data too
213 input("CaseFolding.txt");
216 next if /^\#/ or $_ eq '';
217 my @f = split(/\s*;\s*/, $_);
218 # Full case folding means use status C and F.
219 # We discard status T, Turkish users may wish to change this.
220 if($f[1] eq 'C' or $f[1] eq 'F') {
222 $data{$c}->{casefold} = $f[2];
223 # We are particularly interest in combining characters that
224 # case-fold to non-combining characters, or characters that
225 # case-fold to sequences with combining characters in non-initial
226 # positions, as these required decomposiiton before case-folding
227 my @d = map(hex($_), split(/\s+/, $data{$c}->{casefold}));
228 if($data{$c}->{ccc} != 0) {
229 # This is a combining character
230 if($data{$d[0]}->{ccc} == 0) {
231 # The first character of its case-folded form is NOT
232 # a combining character. The field name is the example
233 # explicitly mentioned in the spec.
234 $data{$c}->{ypogegrammeni} = 1;
237 # This is a non-combining character; inspect the non-initial
238 # code points of the case-folded sequence
240 if(grep($data{$_}->{ccc} != 0, @d)) {
241 # Some non-initial code point in the case-folded for is NOT a
242 # a combining character.
243 $data{$c}->{ypogegrammeni} = 1;
249 # Generate the header file
250 open(STDOUT, ">unidata.h") or die "unidata.h: $!\n";
252 out("/* Automatically generated file, see scripts/make-unidata */\n",
253 "#ifndef UNIDATA_H\n",
254 "#define UNIDATA_H\n");
256 # TODO choose stable values for General_Category
257 out("enum unicode_General_Category {\n",
259 map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
261 out("enum unicode_Grapheme_Break {\n",
263 map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
265 out("extern const char *const unicode_Grapheme_Break_names[];\n");
267 out("enum unicode_Word_Break {\n",
269 map(" unicode_Word_Break_$_", sort keys %wbreak)),
271 out("extern const char *const unicode_Word_Break_names[];\n");
273 out("enum unicode_Sentence_Break {\n",
275 map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
277 out("extern const char *const unicode_Sentence_Break_names[];\n");
279 out("enum unicode_flags {\n",
280 " unicode_normalize_before_casefold = 1\n",
284 # Choose the narrowest type that will fit the required values
286 my ($min, $max) = @_;
288 return "char" if $max <= 127;
289 return "unsigned char" if $max <= 255;
290 return "int16_t" if $max < 32767;
291 return "uint16_t" if $max < 65535;
294 return "char" if $min >= -127 && $max <= 127;
295 return "int16_t" if $min >= -32767 && $max <= 32767;
300 out("struct unidata {\n",
301 " const uint32_t *compat;\n",
302 " const uint32_t *canon;\n",
303 " const uint32_t *casefold;\n",
304 " ".choosetype($minud, $maxud)." upper_offset;\n",
305 " ".choosetype($minld, $maxld)." lower_offset;\n",
306 " ".choosetype(0, $maxccc)." ccc;\n",
307 " char general_category;\n",
309 " char grapheme_break;\n",
310 " char word_break;\n",
311 " char sentence_break;\n",
313 # compat, canon and casefold do have have non-BMP characters, so we
314 # can't use a simple 16-bit table. We could use UTF-8 or UTF-16
315 # though, saving a bit of space (probably not that much...) at the
316 # cost of marginally reduced performance and additional complexity
318 out("extern const struct unidata *const unidata[];\n");
320 out("#define UNICODE_NCHARS ", ($max + 1), "\n");
321 out("#define UNICODE_MODULUS $modulus\n");
325 close STDOUT or die "unidata.h: $!\n";
327 open(STDOUT, ">unidata.c") or die "unidata.c: $!\n";
329 out("/* Automatically generated file, see scripts/make-unidata */\n",
330 "#include <config.h>\n",
331 "#include \"types.h\"\n",
332 "#include \"unidata.h\"\n");
334 # Short aliases to keep .c file small
336 out(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
338 out(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
340 out(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
342 out(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
345 # Names for *_Break properties
346 out("const char *const unicode_Grapheme_Break_names[] = {\n",
348 map(" \"$_\"", sort keys %gbreak)),
350 out("const char *const unicode_Word_Break_names[] = {\n",
352 map(" \"$_\"", sort keys %wbreak)),
354 out("const char *const unicode_Sentence_Break_names[] = {\n",
356 map(" \"$_\"", sort keys %sbreak)),
359 # Generate the decomposition mapping tables. We look out for duplicates
360 # in order to save space and report this as decompsaved at the end. In
361 # Unicode 5.0.0 this saves 1795 entries, which is at least 14Kbytes.
365 out("static const uint32_t ");
366 for(my $c = 0; $c <= $max; ++$c) {
367 # If canon is set then compat will be too and will be identical.
368 # If compat is set the canon might be clear. So we use the
369 # compat version and fix up the symbols after.
370 if(exists $data{$c}->{compat}) {
372 (map(hex($_), split(/\s+/, $data{$c}->{compat})), 0));
373 if(!exists $decompnums{$s}) {
374 out(",\n") if $decompnum != 0;
375 out("cd$decompnum\[]={$s}");
376 $decompnums{$s} = $decompnum++;
380 $data{$c}->{compatsym} = "cd$decompnums{$s}";
381 if(exists $data{$c}->{canon}) {
382 $data{$c}->{canonsym} = "cd$decompnums{$s}";
388 # ...and the case folding table. Again we compress equal entries to save
389 # space. In Unicode 5.0.0 this saves 51 entries or at least 408 bytes.
390 # This doesns't seem as worthwhile as the decomposition mapping saving above.
394 out("static const uint32_t ");
395 for(my $c = 0; $c <= $max; ++$c) {
396 if(exists $data{$c}->{casefold}) {
398 (map(hex($_), split(/\s+/, $data{$c}->{casefold})), 0));
399 if(!exists $cfnums{$s}) {
400 out(",\n") if $cfnum != 0;
401 out("cf$cfnum\[]={$s}");
402 $cfnums{$s} = $cfnum++;
406 $data{$c}->{cfsym} = "cf$cfnums{$s}";
411 # Visit all the $modulus-character blocks in turn and generate the
412 # required subtables. As above we spot duplicates to save space. In
413 # Unicode 5.0.0 with $modulus=128 and current table data this saves
414 # 1372 subtables or at least three and a half megabytes on 32-bit
417 my %subtable = (); # base->subtable number
418 my %subtableno = (); # subtable number -> content
419 my $subtablecounter = 0; # counter for subtable numbers
420 my $subtablessaved = 0; # number of tables saved
421 for(my $base = 0; $base <= $max; $base += $modulus) {
423 for(my $c = $base; $c < $base + $modulus; ++$c) {
425 my $canonsym = ($data{$c}->{canonsym} or "0");
426 my $compatsym = ($data{$c}->{compatsym} or "0");
427 my $cfsym = ($data{$c}->{cfsym} or "0");
429 if($data{$c}->{ypogegrammeni}) {
430 push(@flags, "unicode_normalize_before_casefold");
432 my $flags = @flags ? join("|", @flags) : 0;
448 my $t = join(",\n", @t);
449 if(!exists $subtable{$t}) {
450 out("static const struct unidata st$subtablecounter\[] = {\n",
453 $subtable{$t} = $subtablecounter++;
457 $subtableno{$base} = $subtable{$t};
460 out("const struct unidata*const unidata[]={\n");
461 for(my $base = 0; $base <= $max; $base += $modulus) {
462 #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
463 out("st$subtableno{$base},\n");
467 close STDOUT or die "unidata.c: $!\n";
469 print STDERR "max=$max, subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
470 print STDERR "decompsaved=$decompsaved cfsaved=$cfsaved\n";