#! /usr/bin/perl -w
#
-# Generate a two-level table describing (some of) the fields of UnicodeData.txt
+# This file is part of DisOrder.
+# Copyright (C) 2007 Richard Kettlewell
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+#
+#
+# Generate Unicode support tables
+#
+# This script will download data from unicode.org if the required files
+# aren't in the current directory.
+#
+# After modifying this script you should run:
+# make -C lib rebuild-unicode check
+#
+# Things not supported yet:
+# - SpecialCasing.txt data for case mapping
+# - Title case offsets
+# - Some kind of hinting for composition
+# - Word boundary support
+# - ...
+#
+# NB the generated files DO NOT offer a stable ABI and so are not immediately
+# suitable for use in a general-purpose library. Things that would need to
+# be done:
+# - Hide unidata.h from applications; it will never be ABI- or even API-stable.
+# - Stablized General_Category values
+# - Extend the unicode.h API to general utility rather than just what
+# DisOrder needs.
+# - ...
+#
use strict;
+use File::Basename;
sub out {
print @_ or die "$!\n";
return join("-", map($d->{$_}, sort keys %$d));
}
+# Size of a subtable
+#
+# This can be varied to trade off the number of subtables against their size.
+our $modulus = 128;
+
my %cats = (); # known general categories
my %data = (); # mapping of codepoints to information
-my %comp = (); #
my $max = 0; # maximum codepoint
+my $maxccc = 0; # maximum combining class
+my $maxud = 0;
+my $minud = 0; # max/min upper case offset
+my $maxld = 0;
+my $minld = 0; # max/min lower case offset
+
+# Make sure we have our desired input files. We explicitly specify a
+# Unicode standard version to make sure that a given version of DisOrder
+# supports a given version of Unicode.
+sub input {
+ my $path = shift;
+ my $lpath = basename($path);
+ if(!-e $lpath) {
+ system("wget http://www.unicode.org/Public/5.0.0/ucd/$path");
+ chmod(0444, $lpath) or die "$lpath: $!\n";
+ }
+ open(STDIN, "<$lpath") or die "$lpath: $!\n";
+ print STDERR "Reading $lpath...\n";
+}
+
+# Read the main data file
+input("UnicodeData.txt");
while(<>) {
my @f = split(/;/, $_);
my $c = hex($f[0]); # codepoint
next if $c >= 0xE0000; # ignore various high-numbered stuff
+ # TODO justify this exclusion!
my $name = $f[1];
- my $gc = $f[2]; # general category
- my $ccc = $f[3]; # canonical combining class
- my $sum = hex($f[12]) || $c; # simple upper case mapping
- my $slm = hex($f[13]) || $c; # simple lower case mapping
+ my $gc = $f[2]; # General_Category
+ # Variuos GCs we don't expect to see in UnicodeData.txt
+ $cats{$gc} = 1; # always record all GCs
+ next if $name =~ /(first|last)>/i; # ignore placeholders
+ die "unexpected Cn" if $gc eq 'Cn';
+ die "unexpected Co" if $gc eq 'Cn';
+ die "unexpected Cs" if $gc eq 'Cs';
+ my $ccc = $f[3]; # Canonical_Combining_Class
+ my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
+ my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
+ my $slm = hex($f[13]) || $c; # Simple_Lowercase_Mapping
# recalculate the upper/lower case mappings as offsets
my $ud = $sum - $c;
my $ld = $slm - $c;
+ # update bounds on various values
+ $maxccc = $ccc if $ccc > $maxccc; # assumed never to be -ve
+ $minud = $ud if $ud < $minud;
+ $maxud = $ud if $ud > $maxud;
+ $minld = $ld if $ld < $minld;
+ $maxld = $ld if $ld > $maxld;
$data{$c} = {
"gc" => $gc,
"ccc" => $ccc,
"ud" => $ud,
- "ld" => $ld
+ "ld" => $ld,
};
+ if($dm ne '') {
+ if($dm !~ /</) {
+ # This is a canonical decomposition
+ $data{$c}->{canon} = $dm;
+ $data{$c}->{compat} = $dm;
+ } else {
+ # This is only a compatibility decomposition
+ $dm =~ s/^<.*>\s*//;
+ $data{$c}->{compat} = $dm;
+ }
+ }
$cats{$gc} = 1;
$max = $c if $c > $max;
}
-$max += 255 - ($max % 256); # round up
+sub read_prop_with_ranges {
+ my $path = shift;
+ my $propkey = shift;
+ input($path);
+ while(<>) {
+ chomp;
+ s/\s*\#.*//;
+ next if $_ eq '';
+ my ($range, $propval) = split(/\s*;\s*/, $_);
+ if($range =~ /(.*)\.\.(.*)/) {
+ for my $c (hex($1) .. hex($2)) {
+ if(exists $data{$c}) {
+ $data{$c}->{$propkey} = $propval;
+ }
+ }
+ } else {
+ my $c = hex($range);
+ if(exists $data{$c}) {
+ $data{$c}->{$propkey} = $propval;
+ }
+ }
+ }
+}
+
+# Grapheme_Break etc
+# NB we do this BEFORE filling in blanks so that the Hangul characters
+# don't get filled in; we can compute their properties mechanically.
+read_prop_with_ranges("auxiliary/GraphemeBreakProperty.txt", "gbreak");
+read_prop_with_ranges("auxiliary/WordBreakProperty.txt", "wbreak");
+read_prop_with_ranges("auxiliary/SentenceBreakProperty.txt", "sbreak");
+
+# Compute the full list and fill in the Extend category properly
+my %gbreak = ();
+my %wbreak = ();
+my %sbreak = ();
+for my $c (keys %data) {
+ if(!exists $data{$c}->{gbreak}) {
+ $data{$c}->{gbreak} = 'Other';
+ }
+ $gbreak{$data{$c}->{gbreak}} = 1;
+
+ if(!exists $data{$c}->{wbreak}) {
+ if($data{$c}->{gbreak} eq 'Extend') {
+ $data{$c}->{wbreak} = 'Extend';
+ } else {
+ $data{$c}->{wbreak} = 'Other';
+ }
+ }
+ $wbreak{$data{$c}->{wbreak}} = 1;
+
+ if(!exists $data{$c}->{sbreak}) {
+ if($data{$c}->{gbreak} eq 'Extend') {
+ $data{$c}->{sbreak} = 'Extend';
+ } else {
+ $data{$c}->{sbreak} = 'Other';
+ }
+ }
+ $sbreak{$data{$c}->{sbreak}} = 1;
+}
+
+# Round up the maximum value to a whole number of subtables
+$max += ($modulus - 1) - ($max % $modulus);
+
+# Surrogates
+my $Cs = {
+ "gc" => "Cs", # UTF-16 surrogate
+ "ccc" => 0,
+ "ud" => 0,
+ "ld" => 0
+};
+for(my $c = 0xD800; $c <= 0xDFFF; ++$c) {
+ $data{$c} = $Cs;
+}
+
+# Private use characters
+# We only fill in values below $max, utf32__unidata()
+my $Co = {
+ "gc" => "Co",
+ "ccc" => 0,
+ "ud" => 0,
+ "ld" => 0
+};
+for(my $c = 0xE000; $c <= 0xF8FF && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
+for(my $c = 0xF0000; $c <= 0xFFFFD && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
+for(my $c = 0x100000; $c <= 0x10FFFD && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
-# Make sure there are no gaps
+# Anything left is not assigned
+my $Cn = {
+ "gc" => "Cn", # not assigned
+ "ccc" => 0,
+ "ud" => 0,
+ "ld" => 0
+};
for(my $c = 0; $c <= $max; ++$c) {
if(!exists $data{$c}) {
- $data{$c} = {
- "gc" => "Cn", # not assigned
- "ccc" => 0,
- "ud" => 0,
- "ld" => 0
- };
+ $data{$c} = $Cn;
+ }
+ if(!exists $data{$c}->{wbreak}) {
+ $data{$c}->{wbreak} = 'Other';
+ }
+ if(!exists $data{$c}->{gbreak}) {
+ $data{$c}->{gbreak} = 'Other';
+ }
+ if(!exists $data{$c}->{sbreak}) {
+ $data{$c}->{sbreak} = 'Other';
}
}
$cats{'Cn'} = 1;
+# Read the casefolding data too
+input("CaseFolding.txt");
+while(<>) {
+ chomp;
+ next if /^\#/ or $_ eq '';
+ my @f = split(/\s*;\s*/, $_);
+ # Full case folding means use status C and F.
+ # We discard status T, Turkish users may wish to change this.
+ if($f[1] eq 'C' or $f[1] eq 'F') {
+ my $c = hex($f[0]);
+ $data{$c}->{casefold} = $f[2];
+ # We are particularly interest in combining characters that
+ # case-fold to non-combining characters, or characters that
+ # case-fold to sequences with combining characters in non-initial
+ # positions, as these required decomposiiton before case-folding
+ my @d = map(hex($_), split(/\s+/, $data{$c}->{casefold}));
+ if($data{$c}->{ccc} != 0) {
+ # This is a combining character
+ if($data{$d[0]}->{ccc} == 0) {
+ # The first character of its case-folded form is NOT
+ # a combining character. The field name is the example
+ # explicitly mentioned in the spec.
+ $data{$c}->{ypogegrammeni} = 1;
+ }
+ } else {
+ # This is a non-combining character; inspect the non-initial
+ # code points of the case-folded sequence
+ shift(@d);
+ if(grep($data{$_}->{ccc} != 0, @d)) {
+ # Some non-initial code point in the case-folded for is NOT a
+ # a combining character.
+ $data{$c}->{ypogegrammeni} = 1;
+ }
+ }
+ }
+}
+
+# Generate the header file
+print STDERR "Generating unidata.h...\n";
open(STDOUT, ">unidata.h") or die "unidata.h: $!\n";
-out("#ifndef UNIDATA_H\n",
+out("/* Automatically generated file, see scripts/make-unidata */\n",
+ "#ifndef UNIDATA_H\n",
"#define UNIDATA_H\n");
-out("enum unicode_gc_cat {\n",
+# TODO choose stable values for General_Category
+out("enum unicode_General_Category {\n",
+ join(",\n",
+ map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
+
+out("enum unicode_Grapheme_Break {\n",
join(",\n",
- map(" unicode_gc_$_", sort keys %cats)), "\n};\n");
-
+ map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
+ "\n};\n");
+out("extern const char *const unicode_Grapheme_Break_names[];\n");
+
+out("enum unicode_Word_Break {\n",
+ join(",\n",
+ map(" unicode_Word_Break_$_", sort keys %wbreak)),
+ "\n};\n");
+out("extern const char *const unicode_Word_Break_names[];\n");
+
+out("enum unicode_Sentence_Break {\n",
+ join(",\n",
+ map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
+ "\n};\n");
+out("extern const char *const unicode_Sentence_Break_names[];\n");
+
+out("enum unicode_flags {\n",
+ " unicode_normalize_before_casefold = 1\n",
+ "};\n",
+ "\n");
+
+# Choose the narrowest type that will fit the required values
+sub choosetype {
+ my ($min, $max) = @_;
+ if($min >= 0) {
+ return "char" if $max <= 127;
+ return "unsigned char" if $max <= 255;
+ return "int16_t" if $max < 32767;
+ return "uint16_t" if $max < 65535;
+ return "int32_t";
+ } else {
+ return "char" if $min >= -127 && $max <= 127;
+ return "int16_t" if $min >= -32767 && $max <= 32767;
+ return "int32_t";
+ }
+}
+
out("struct unidata {\n",
- " enum unicode_gc_cat gc;\n",
- " int ccc;\n",
- " int upper_offset;\n",
- " int lower_offset;\n",
+ " const uint32_t *compat;\n",
+ " const uint32_t *canon;\n",
+ " const uint32_t *casefold;\n",
+ " ".choosetype($minud, $maxud)." upper_offset;\n",
+ " ".choosetype($minld, $maxld)." lower_offset;\n",
+ " ".choosetype(0, $maxccc)." ccc;\n",
+ " char general_category;\n",
+ " uint8_t flags;\n",
+ " char grapheme_break;\n",
+ " char word_break;\n",
+ " char sentence_break;\n",
"};\n");
+# compat, canon and casefold do have have non-BMP characters, so we
+# can't use a simple 16-bit table. We could use UTF-8 or UTF-16
+# though, saving a bit of space (probably not that much...) at the
+# cost of marginally reduced performance and additional complexity
out("extern const struct unidata *const unidata[];\n");
out("#define UNICODE_NCHARS ", ($max + 1), "\n");
+out("#define UNICODE_MODULUS $modulus\n");
out("#endif\n");
close STDOUT or die "unidata.h: $!\n";
+print STDERR "Generating unidata.c...\n";
open(STDOUT, ">unidata.c") or die "unidata.c: $!\n";
-out("#include \"unidata.h\"\n");
+out("/* Automatically generated file, see scripts/make-unidata */\n",
+ "#include <config.h>\n",
+ "#include \"types.h\"\n",
+ "#include \"unidata.h\"\n");
+
+# Short aliases to keep .c file small
+
+out(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
+ sort keys %cats));
+out(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
+ sort keys %gbreak));
+out(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
+ sort keys %wbreak));
+out(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
+ sort keys %sbreak));
+
+# Names for *_Break properties
+out("const char *const unicode_Grapheme_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %gbreak)),
+ "\n};\n");
+out("const char *const unicode_Word_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %wbreak)),
+ "\n};\n");
+out("const char *const unicode_Sentence_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %sbreak)),
+ "\n};\n");
+
+# Generate the decomposition mapping tables. We look out for duplicates
+# in order to save space and report this as decompsaved at the end. In
+# Unicode 5.0.0 this saves 1795 entries, which is at least 14Kbytes.
+my $decompnum = 0;
+my %decompnums = ();
+my $decompsaved = 0;
+out("static const uint32_t ");
+for(my $c = 0; $c <= $max; ++$c) {
+ # If canon is set then compat will be too and will be identical.
+ # If compat is set the canon might be clear. So we use the
+ # compat version and fix up the symbols after.
+ if(exists $data{$c} && exists $data{$c}->{compat}) {
+ my $s = join(",",
+ (map(hex($_), split(/\s+/, $data{$c}->{compat})), 0));
+ if(!exists $decompnums{$s}) {
+ out(",\n") if $decompnum != 0;
+ out("cd$decompnum\[]={$s}");
+ $decompnums{$s} = $decompnum++;
+ } else {
+ ++$decompsaved;
+ }
+ $data{$c}->{compatsym} = "cd$decompnums{$s}";
+ if(exists $data{$c}->{canon}) {
+ $data{$c}->{canonsym} = "cd$decompnums{$s}";
+ }
+ }
+}
+out(";\n");
+
+# ...and the case folding table. Again we compress equal entries to save
+# space. In Unicode 5.0.0 this saves 51 entries or at least 408 bytes.
+# This doesns't seem as worthwhile as the decomposition mapping saving above.
+my $cfnum = 0;
+my %cfnums = ();
+my $cfsaved = 0;
+out("static const uint32_t ");
+for(my $c = 0; $c <= $max; ++$c) {
+ if(exists $data{$c} && exists $data{$c}->{casefold}) {
+ my $s = join(",",
+ (map(hex($_), split(/\s+/, $data{$c}->{casefold})), 0));
+ if(!exists $cfnums{$s}) {
+ out(",\n") if $cfnum != 0;
+ out("cf$cfnum\[]={$s}");
+ $cfnums{$s} = $cfnum++;
+ } else {
+ ++$cfsaved;
+ }
+ $data{$c}->{cfsym} = "cf$cfnums{$s}";
+ }
+}
+out(";\n");
+
+# Visit all the $modulus-character blocks in turn and generate the
+# required subtables. As above we spot duplicates to save space. In
+# Unicode 5.0.0 with $modulus=128 and current table data this saves
+# 1372 subtables or at least three and a half megabytes on 32-bit
+# platforms.
-# Visit all the 256-character blocks in turn and generate the required
-# subtables
my %subtable = (); # base->subtable number
my %subtableno = (); # subtable number -> content
my $subtablecounter = 0; # counter for subtable numbers
-for(my $base = 0; $base <= $max; $base += 256) {
+my $subtablessaved = 0; # number of tables saved
+for(my $base = 0; $base <= $max; $base += $modulus) {
my @t;
- for(my $c = $base; $c <= $base + 255; ++$c) {
+ for(my $c = $base; $c < $base + $modulus; ++$c) {
my $d = $data{$c};
- push(@t,
- " { unicode_gc_$d->{gc}, $d->{ccc}, $d->{ud}, $d->{ld} }");
+ my $canonsym = ($data{$c}->{canonsym} or "0");
+ my $compatsym = ($data{$c}->{compatsym} or "0");
+ my $cfsym = ($data{$c}->{cfsym} or "0");
+ my @flags = ();
+ if($data{$c}->{ypogegrammeni}) {
+ push(@flags, "unicode_normalize_before_casefold");
+ }
+ my $flags = @flags ? join("|", @flags) : 0;
+ push(@t, "{".
+ join(",",
+ $compatsym,
+ $canonsym,
+ $cfsym,
+ $d->{ud},
+ $d->{ld},
+ $d->{ccc},
+ $d->{gc},
+ $flags,
+ "GB$d->{gbreak}",
+ "WB$d->{wbreak}",
+ "SB$d->{sbreak}",
+ )."}");
}
my $t = join(",\n", @t);
if(!exists $subtable{$t}) {
- out("static const struct unidata subtable$subtablecounter\[] = {\n",
+ out("static const struct unidata st$subtablecounter\[] = {\n",
"$t\n",
"};\n");
$subtable{$t} = $subtablecounter++;
+ } else {
+ ++$subtablessaved;
}
$subtableno{$base} = $subtable{$t};
}
-out("const struct unidata *const unidata[] = {\n");
-for(my $base = 0; $base <= $max; $base += 256) {
- out(" subtable$subtableno{$base},\n");
+out("const struct unidata *const unidata[]={\n");
+for(my $base = 0; $base <= $max; $base += $modulus) {
+ #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
+ out("st$subtableno{$base},\n");
}
out("};\n");
close STDOUT or die "unidata.c: $!\n";
-print STDERR "max=$max, subtables=$subtablecounter\n";
+printf STDERR "max=%04X\n", $max;
+print STDERR "subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
+print STDERR "decompsaved=$decompsaved cfsaved=$cfsaved\n";