X-Git-Url: https://www.chiark.greenend.org.uk/ucgi/~mdw/git/disorder/blobdiff_plain/0b7052dac24e57edb180f07c0bd3479d397ebb74..7489d3af05a4394e9621e14a196a9f6c4f788a26:/scripts/make-unidata
diff --git a/scripts/make-unidata b/scripts/make-unidata
index b00eb0a..28532a9 100755
--- a/scripts/make-unidata
+++ b/scripts/make-unidata
@@ -3,20 +3,18 @@
# This file is part of DisOrder.
# Copyright (C) 2007 Richard Kettlewell
#
-# This program is free software; you can redistribute it and/or modify
+# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-# USA
+# along with this program. If not, see .
#
#
# Generate Unicode support tables
@@ -31,7 +29,6 @@
# - SpecialCasing.txt data for case mapping
# - Title case offsets
# - Some kind of hinting for composition
-# - Word boundary support
# - ...
#
# NB the generated files DO NOT offer a stable ABI and so are not immediately
@@ -60,7 +57,24 @@ sub key {
# Size of a subtable
#
# This can be varied to trade off the number of subtables against their size.
-our $modulus = 128;
+# 16 gave the smallest results last time I checked (on a Mac with a 32-bit
+# build).
+our $modulus = 16;
+
+if(@ARGV) {
+ $modulus = shift;
+}
+
+# Where to break the table. There is a huge empty section of the Unicode
+# code space and we deal with this by simply leaving it out of the table.
+# This complicates the lookup function a little but should not affect
+# performance in the cases we care about.
+our $break_start = 0x30000;
+our $break_end = 0xE0000;
+
+# Similarly we simply omit the very top of the table and sort it out in the
+# lookup function.
+our $break_top = 0xE0200;
my %cats = (); # known general categories
my %data = (); # mapping of codepoints to information
@@ -78,22 +92,39 @@ sub input {
my $path = shift;
my $lpath = basename($path);
if(!-e $lpath) {
- system("wget http://www.unicode.org/Public/5.0.0/ucd/$path");
+ system("wget http://www.unicode.org/Public/6.0.0/ucd/$path");
chmod(0444, $lpath) or die "$lpath: $!\n";
}
open(STDIN, "<$lpath") or die "$lpath: $!\n";
+ print STDERR "Reading $lpath...\n";
}
# Read the main data file
input("UnicodeData.txt");
+my ($start, $end);
+my $maxcompat = 0;
+my $maxcanon = 0;
+my $hangul_syllable_decomps = 0;
+my $hangul_choseong_decomps = 0;
while(<>) {
my @f = split(/;/, $_);
my $c = hex($f[0]); # codepoint
- next if $c >= 0xE0000; # ignore various high-numbered stuff
- # TODO justify this exclusion!
my $name = $f[1];
+ die "$f[0] $name is in the break\n"
+ if $c >= $break_start && $c < $break_end;
my $gc = $f[2]; # General_Category
+ # Variuos GCs we don't expect to see in UnicodeData.txt
+ $cats{$gc} = 1; # always record all GCs
+ if($name =~ /first>/i) {
+ $start = $c;
+ next;
+ } elsif($name =~ /last>/i) {
+ $end = $c;
+ } else {
+ $start = $end = $c;
+ }
+ die "unexpected Cn" if $gc eq 'Cn';
my $ccc = $f[3]; # Canonical_Combining_Class
my $dm = $f[5]; # Decomposition_Type + Decomposition_Mapping
my $sum = hex($f[12]) || $c; # Simple_Uppercase_Mapping
@@ -107,25 +138,42 @@ while(<>) {
$maxud = $ud if $ud > $maxud;
$minld = $ld if $ld < $minld;
$maxld = $ld if $ld > $maxld;
- $data{$c} = {
- "gc" => $gc,
- "ccc" => $ccc,
- "ud" => $ud,
- "ld" => $ld,
+ if($start != $end) {
+ printf STDERR "> range %04X-%04X is %s\n", $start, $end, $gc;
+ }
+ for($c = $start; $c <= $end; ++$c) {
+ my $d = {
+ "gc" => $gc,
+ "ccc" => $ccc,
+ "ud" => $ud,
+ "ld" => $ld,
};
- if($dm ne '') {
- if($dm !~ /) {
- # This is a canonical decomposition
- $data{$c}->{canon} = $dm;
- $data{$c}->{compat} = $dm;
- } else {
- # This is only a compatibility decomposition
- $dm =~ s/^<.*>\s*//;
- $data{$c}->{compat} = $dm;
+ if($dm ne '') {
+ my $maxref;
+ if($dm =~ /) {
+ # This is a compatibility decomposition
+ $dm =~ s/^<.*>\s*//;
+ $d->{compat} = 1;
+ $maxref = \$maxcompat;
+ } else {
+ $maxref = \$maxcanon;
+ }
+ $d->{decomp} = [map(hex($_), split(/\s+/, $dm))];
+ my $len = scalar @{$d->{decomp}};
+ $$maxref = $len if $len > $$maxref;
+ if(!$d->{compat}) {
+ if(${$d->{decomp}}[0] >= 0xAC00 && ${$d->{decomp}}[0] <= 0xD7A3) {
+ ++$hangul_syllable_decomps;
+ }
+ if(${$d->{decomp}}[0] >= 0x1100 && ${$d->{decomp}}[0] <= 0x115F) {
+ ++$hangul_choseong_decomps;
+ }
+ }
}
+ $data{$c} = $d;
}
$cats{$gc} = 1;
- $max = $c if $c > $max;
+ $max = $end if $end > $max;
}
sub read_prop_with_ranges {
@@ -139,55 +187,110 @@ sub read_prop_with_ranges {
my ($range, $propval) = split(/\s*;\s*/, $_);
if($range =~ /(.*)\.\.(.*)/) {
for my $c (hex($1) .. hex($2)) {
- if(exists $data{$c}) {
- $data{$c}->{$propkey} = $propval;
- }
+ $data{$c}->{$propkey} = $propval;
}
} else {
my $c = hex($range);
- if(exists $data{$c}) {
- $data{$c}->{$propkey} = $propval;
- }
+ $data{$c}->{$propkey} = $propval;
}
}
}
-# Grapheme_Break
-# NB we do this BEFORE filling in blanks so that the Hangul characters
-# don't get filled in; we can compute their properties mechanically.
+# Grapheme_Break etc
read_prop_with_ranges("auxiliary/GraphemeBreakProperty.txt", "gbreak");
-
-# Word_Break
-# Same remarks about Hangul as above. This one currently seems just too
-# complicated to do programmatically so we'll take a byte to store it.
read_prop_with_ranges("auxiliary/WordBreakProperty.txt", "wbreak");
+read_prop_with_ranges("auxiliary/SentenceBreakProperty.txt", "sbreak");
-# Make the list of Word_Break values
-my %wbpropvals = ();
+# Compute the full list and fill in the Extend category properly
+my %gbreak = ();
+my %wbreak = ();
+my %sbreak = ();
for my $c (keys %data) {
+ if(!exists $data{$c}->{gbreak}) {
+ $data{$c}->{gbreak} = 'Other';
+ }
+ $gbreak{$data{$c}->{gbreak}} = 1;
+
if(!exists $data{$c}->{wbreak}) {
- if(exists $data{$c}->{gbreak} && $data{$c}->{gbreak} eq 'Extend') {
+ if($data{$c}->{gbreak} eq 'Extend') {
$data{$c}->{wbreak} = 'Extend';
} else {
$data{$c}->{wbreak} = 'Other';
}
}
- $wbpropvals{$data{$c}->{wbreak}} = 1;
+ $wbreak{$data{$c}->{wbreak}} = 1;
+
+ if(!exists $data{$c}->{sbreak}) {
+ if($data{$c}->{gbreak} eq 'Extend') {
+ $data{$c}->{sbreak} = 'Extend';
+ } else {
+ $data{$c}->{sbreak} = 'Other';
+ }
+ }
+ $sbreak{$data{$c}->{sbreak}} = 1;
+}
+
+# Various derived properties
+input("DerivedNormalizationProps.txt");
+while(<>) {
+ chomp;
+ s/\s*\#.*//;
+ next if $_ eq '';
+ my @f = split(/\s*;\s*/, $_);
+ if(@f == 2) {
+ push(@f, 1);
+ }
+ my ($range, $propkey, $propval) = @f;
+ if($range =~ /(.*)\.\.(.*)/) {
+ for my $c (hex($1) .. hex($2)) {
+ $data{$c}->{$propkey} = $propval
+ }
+ } else {
+ my $c = hex($range);
+ $data{$c}->{$propkey} = $propval
+ }
}
# Round up the maximum value to a whole number of subtables
$max += ($modulus - 1) - ($max % $modulus);
-# Make sure there are no gaps
+# Private use characters
+# We only fill in values below $max, utf32__unidata()
+my $Co = {
+ "gc" => "Co",
+ "ccc" => 0,
+ "ud" => 0,
+ "ld" => 0
+};
+for(my $c = 0xE000; $c <= 0xF8FF && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
+for(my $c = 0xF0000; $c <= 0xFFFFD && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
+for(my $c = 0x100000; $c <= 0x10FFFD && $c <= $max; ++$c) {
+ $data{$c} = $Co;
+}
+
+# Anything left is not assigned
+my $Cn = {
+ "gc" => "Cn", # not assigned
+ "ccc" => 0,
+ "ud" => 0,
+ "ld" => 0
+};
for(my $c = 0; $c <= $max; ++$c) {
if(!exists $data{$c}) {
- $data{$c} = {
- "gc" => "Cn", # not assigned
- "ccc" => 0,
- "ud" => 0,
- "ld" => 0,
- "wbreak" => 'Other',
- };
+ $data{$c} = $Cn;
+ }
+ if(!exists $data{$c}->{wbreak}) {
+ $data{$c}->{wbreak} = 'Other';
+ }
+ if(!exists $data{$c}->{gbreak}) {
+ $data{$c}->{gbreak} = 'Other';
+ }
+ if(!exists $data{$c}->{sbreak}) {
+ $data{$c}->{sbreak} = 'Other';
}
}
$cats{'Cn'} = 1;
@@ -230,24 +333,45 @@ while(<>) {
}
# Generate the header file
+print STDERR "Generating unidata.h...\n";
open(STDOUT, ">unidata.h") or die "unidata.h: $!\n";
-out("/* Automatically generated file, see scripts/make-unidata */\n",
+out("/** \@file lib/unidata.h\n",
+ " * \@brief Unicode tables\n",
+ " *\n",
+ " * Automatically generated file, see scripts/make-unidata\n",
+ " *\n",
+ " * DO NOT EDIT.\n",
+ " */\n",
"#ifndef UNIDATA_H\n",
"#define UNIDATA_H\n");
# TODO choose stable values for General_Category
-out("enum unicode_gc_cat {\n",
+out("enum unicode_General_Category {\n",
join(",\n",
- map(" unicode_gc_$_", sort keys %cats)), "\n};\n");
+ map(" unicode_General_Category_$_", sort keys %cats)), "\n};\n");
+
+out("enum unicode_Grapheme_Break {\n",
+ join(",\n",
+ map(" unicode_Grapheme_Break_$_", sort keys %gbreak)),
+ "\n};\n");
+out("extern const char *const unicode_Grapheme_Break_names[];\n");
out("enum unicode_Word_Break {\n",
join(",\n",
- map(" unicode_Word_Break_$_", sort keys %wbpropvals)),
+ map(" unicode_Word_Break_$_", sort keys %wbreak)),
+ "\n};\n");
+out("extern const char *const unicode_Word_Break_names[];\n");
+
+out("enum unicode_Sentence_Break {\n",
+ join(",\n",
+ map(" unicode_Sentence_Break_$_", sort keys %sbreak)),
"\n};\n");
+out("extern const char *const unicode_Sentence_Break_names[];\n");
out("enum unicode_flags {\n",
- " unicode_normalize_before_casefold = 1\n",
+ " unicode_normalize_before_casefold = 1,\n",
+ " unicode_compatibility_decomposition = 2\n",
"};\n",
"\n");
@@ -268,91 +392,158 @@ sub choosetype {
}
out("struct unidata {\n",
- " const uint32_t *compat;\n",
- " const uint32_t *canon;\n",
+ # decomposition (canonical or compatibility;
+ # unicode_compatibility_decomposition distinguishes) or NULL
+ " const uint32_t *decomp;\n",
+
+ # case-folded string or NULL
" const uint32_t *casefold;\n",
- " ".choosetype($minud, $maxud)." upper_offset;\n",
- " ".choosetype($minld, $maxld)." lower_offset;\n",
+
+ # composed characters that start with this code point. This only
+ # includes primary composites, i.e. the decomposition mapping is
+ # canonical and this code point is not in the exclusion table.
+ " const uint32_t *composed;\n",
+
+# " ".choosetype($minud, $maxud)." upper_offset;\n",
+# " ".choosetype($minld, $maxld)." lower_offset;\n",
+
+ # canonical combining class
" ".choosetype(0, $maxccc)." ccc;\n",
- " char gc;\n",
+ " char general_category;\n",
+
+ # see unicode_flags enum
" uint8_t flags;\n",
+ " char grapheme_break;\n",
" char word_break;\n",
+ " char sentence_break;\n",
"};\n");
-# compat, canon and casefold do have have non-BMP characters, so we
+# decomp and casefold do have have non-BMP characters, so we
# can't use a simple 16-bit table. We could use UTF-8 or UTF-16
# though, saving a bit of space (probably not that much...) at the
# cost of marginally reduced performance and additional complexity
out("extern const struct unidata *const unidata[];\n");
+out("extern const struct unicode_utf8_row {\n",
+ " uint8_t count;\n",
+ " uint8_t min2, max2;\n",
+ "} unicode_utf8_valid[];\n");
+
out("#define UNICODE_NCHARS ", ($max + 1), "\n");
out("#define UNICODE_MODULUS $modulus\n");
+out("#define UNICODE_BREAK_START $break_start\n");
+out("#define UNICODE_BREAK_END $break_end\n");
+out("#define UNICODE_BREAK_TOP $break_top\n");
out("#endif\n");
close STDOUT or die "unidata.h: $!\n";
+print STDERR "Generating unidata.c...\n";
open(STDOUT, ">unidata.c") or die "unidata.c: $!\n";
-out("/* Automatically generated file, see scripts/make-unidata */\n",
- "#include \n",
- "#include \"types.h\"\n",
+out("/** \@file lib/unidata.c\n",
+ " * \@brief Unicode tables\n",
+ " *\n",
+ " * Automatically generated file, see scripts/make-unidata\n",
+ " *\n",
+ " * DO NOT EDIT.\n",
+ " */\n",
+ "#include \"common.h\"\n",
"#include \"unidata.h\"\n");
-# Short aliases for general category codes
-
-out(map(sprintf("#define %s unicode_gc_%s\n", $_, $_), sort keys %cats));
+# Short aliases to keep .c file small
+
+out(map(sprintf("#define %s unicode_General_Category_%s\n", $_, $_),
+ sort keys %cats));
+out(map(sprintf("#define GB%s unicode_Grapheme_Break_%s\n", $_, $_),
+ sort keys %gbreak));
+out(map(sprintf("#define WB%s unicode_Word_Break_%s\n", $_, $_),
+ sort keys %wbreak));
+out(map(sprintf("#define SB%s unicode_Sentence_Break_%s\n", $_, $_),
+ sort keys %sbreak));
+out("#define NBC unicode_normalize_before_casefold\n");
+out("#define CD unicode_compatibility_decomposition\n");
+
+# Names for *_Break properties
+out("const char *const unicode_Grapheme_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %gbreak)),
+ "\n};\n");
+out("const char *const unicode_Word_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %wbreak)),
+ "\n};\n");
+out("const char *const unicode_Sentence_Break_names[] = {\n",
+ join(",\n",
+ map(" \"$_\"", sort keys %sbreak)),
+ "\n};\n");
-# Generate the decomposition mapping tables. We look out for duplicates
-# in order to save space and report this as decompsaved at the end. In
-# Unicode 5.0.0 this saves 1795 entries, which is at least 14Kbytes.
-my $decompnum = 0;
-my %decompnums = ();
-my $decompsaved = 0;
+our $ddnum = 0;
+our $ddsaved = 0;
+our %ddnums = ();
+my $ddfirst = 1;
out("static const uint32_t ");
-for(my $c = 0; $c <= $max; ++$c) {
- # If canon is set then compat will be too and will be identical.
- # If compat is set the canon might be clear. So we use the
- # compat version and fix up the symbols after.
- if(exists $data{$c}->{compat}) {
- my $s = join(",",
- (map(hex($_), split(/\s+/, $data{$c}->{compat})), 0));
- if(!exists $decompnums{$s}) {
- out(",\n") if $decompnum != 0;
- out("cd$decompnum\[]={$s}");
- $decompnums{$s} = $decompnum++;
+sub dedupe {
+ my $s = join(",", @_);
+ if(!exists $ddnums{$s}) {
+ if($ddfirst) {
+ $ddfirst = 0;
} else {
- ++$decompsaved;
- }
- $data{$c}->{compatsym} = "cd$decompnums{$s}";
- if(exists $data{$c}->{canon}) {
- $data{$c}->{canonsym} = "cd$decompnums{$s}";
+ out(",\n");
}
+ out("dd$ddnum\[]={$s}");
+ $ddnums{$s} = $ddnum++;
+ } else {
+ ++$ddsaved;
}
+ return "dd$ddnums{$s}";
}
-out(";\n");
-# ...and the case folding table. Again we compress equal entries to save
-# space. In Unicode 5.0.0 this saves 51 entries or at least 408 bytes.
-# This doesns't seem as worthwhile as the decomposition mapping saving above.
-my $cfnum = 0;
-my %cfnums = ();
-my $cfsaved = 0;
-out("static const uint32_t ");
+# Generate the decomposition mapping tables.
+print STDERR "> decomposition mappings\n";
for(my $c = 0; $c <= $max; ++$c) {
- if(exists $data{$c}->{casefold}) {
- my $s = join(",",
- (map(hex($_), split(/\s+/, $data{$c}->{casefold})), 0));
- if(!exists $cfnums{$s}) {
- out(",\n") if $cfnum != 0;
- out("cf$cfnum\[]={$s}");
- $cfnums{$s} = $cfnum++;
+ if(exists $data{$c} && exists $data{$c}->{decomp}) {
+ $data{$c}->{decompsym} = dedupe(@{$data{$c}->{decomp}}, 0);
+ }
+}
+
+print STDERR "> composition mappings\n";
+# First we must generate the mapping of each code point to possible
+# compositions.
+for(my $c = 0; $c <= $max; ++$c) {
+ if(exists $data{$c}
+ && exists $data{$c}->{decomp}
+ && !exists $data{$c}->{compat}
+ && !$data{$c}->{Full_Composition_Exclusion}) {
+ # $c has a non-excluded canonical decomposition, i.e. it is
+ # a primary composite. Find the first code point of the decomposition
+ my $first = ${$data{$c}->{decomp}}[0];
+ if(!exists $data{$first}->{compose}) {
+ $data{$first}->{compose} = [$c];
} else {
- ++$cfsaved;
+ push(@{$data{$first}->{compose}}, $c);
}
- $data{$c}->{cfsym} = "cf$cfnums{$s}";
}
}
+# Then we can generate the tables.
+for(my $c = 0; $c <= $max; ++$c) {
+ if(exists $data{$c} && exists $data{$c}->{compose}) {
+ $data{$c}->{compsym} = dedupe(@{$data{$c}->{compose}}, 0);
+ }
+}
+
+# The case folding table.
+print STDERR "> case-fold mappings\n";
+for(my $c = 0; $c <= $max; ++$c) {
+ if(exists $data{$c} && exists $data{$c}->{casefold}) {
+ $data{$c}->{cfsym} = dedupe(map(hex($_), split(/\s+/,
+ $data{$c}->{casefold})),
+ 0);
+ }
+}
+
+# End of de-dupable arrays
out(";\n");
# Visit all the $modulus-character blocks in turn and generate the
@@ -360,38 +551,48 @@ out(";\n");
# Unicode 5.0.0 with $modulus=128 and current table data this saves
# 1372 subtables or at least three and a half megabytes on 32-bit
# platforms.
-
+print STDERR "> subtables\n";
my %subtable = (); # base->subtable number
my %subtableno = (); # subtable number -> content
my $subtablecounter = 0; # counter for subtable numbers
my $subtablessaved = 0; # number of tables saved
for(my $base = 0; $base <= $max; $base += $modulus) {
+ next if $base >= $break_start && $base < $break_end;
+ next if $base >= $break_top;
my @t;
for(my $c = $base; $c < $base + $modulus; ++$c) {
my $d = $data{$c};
- my $canonsym = ($data{$c}->{canonsym} or "0");
- my $compatsym = ($data{$c}->{compatsym} or "0");
- my $cfsym = ($data{$c}->{cfsym} or "0");
+ my $decompsym = ($d->{decompsym} or "0");
+ my $cfsym = ($d->{cfsym} or "0");
+ my $compsym = ($d->{compsym} or "0");
+ my $ccc = ($d->{ccc} or "0");
+ my $gc = ($d->{gc} or "Cn");
my @flags = ();
if($data{$c}->{ypogegrammeni}) {
- push(@flags, "unicode_normalize_before_casefold");
+ push(@flags, "NBC");
+ }
+ if($data{$c}->{compat}) {
+ push(@flags, "CD");
}
my $flags = @flags ? join("|", @flags) : 0;
- push(@t, "{".
+ push(@t, "{".
join(",",
- $compatsym,
- $canonsym,
+ $decompsym,
$cfsym,
- $d->{ud},
- $d->{ld},
- $d->{ccc},
- $d->{gc},
+ $compsym,
+# $d->{ud},
+# $d->{ld},
+ $ccc,
+ $gc,
$flags,
- "unicode_Word_Break_$d->{wbreak}",
+ "GB$d->{gbreak}",
+ "WB$d->{wbreak}",
+ "SB$d->{sbreak}",
)."}");
}
my $t = join(",\n", @t);
if(!exists $subtable{$t}) {
+ out(sprintf("/* %04X-%04X */\n", $base, $base + $modulus - 1));
out("static const struct unidata st$subtablecounter\[] = {\n",
"$t\n",
"};\n");
@@ -402,13 +603,66 @@ for(my $base = 0; $base <= $max; $base += $modulus) {
$subtableno{$base} = $subtable{$t};
}
-out("const struct unidata*const unidata[]={\n");
+print STDERR "> main table\n";
+out("const struct unidata *const unidata[]={\n");
for(my $base = 0; $base <= $max; $base += $modulus) {
+ next if $base >= $break_start && $base < $break_end;
+ next if $base >= $break_top;
+ #out("st$subtableno{$base} /* ".sprintf("%04x", $base)." */,\n");
out("st$subtableno{$base},\n");
}
out("};\n");
+print STDERR "> UTF-8 table\n";
+out("const struct unicode_utf8_row unicode_utf8_valid[] = {\n");
+for(my $c = 0; $c <= 0x7F; ++$c) {
+ out(" { 1, 0, 0 }, /* $c */\n");
+}
+for(my $c = 0x80; $c < 0xC2; ++$c) {
+ out(" { 0, 0, 0 }, /* $c */\n");
+}
+for(my $c = 0xC2; $c <= 0xDF; ++$c) {
+ out(" { 2, 0x80, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xE0; $c <= 0xE0; ++$c) {
+ out(" { 3, 0xA0, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xE1; $c <= 0xEC; ++$c) {
+ out(" { 3, 0x80, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xED; $c <= 0xED; ++$c) {
+ out(" { 3, 0x80, 0x9F }, /* $c */\n");
+}
+for(my $c = 0xEE; $c <= 0xEF; ++$c) {
+ out(" { 3, 0x80, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xF0; $c <= 0xF0; ++$c) {
+ out(" { 4, 0x90, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xF1; $c <= 0xF3; ++$c) {
+ out(" { 4, 0x80, 0xBF }, /* $c */\n");
+}
+for(my $c = 0xF4; $c <= 0xF4; ++$c) {
+ out(" { 4, 0x80, 0x8F }, /* $c */\n");
+}
+for(my $c = 0xF5; $c <= 0xFF; ++$c) {
+ out(" { 0, 0, 0 }, /* $c */\n");
+}
+out("};\n");
+
close STDOUT or die "unidata.c: $!\n";
-print STDERR "max=$max, subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
-print STDERR "decompsaved=$decompsaved cfsaved=$cfsaved\n";
+print STDERR "Done.\n\n";
+printf STDERR "modulus=%d\n", $modulus;
+printf STDERR "max=%04X\n", $max;
+print STDERR "subtables=$subtablecounter, subtablessaved=$subtablessaved\n";
+print STDERR "ddsaved=$ddsaved\n";
+print STDERR "maxcompat=$maxcompat maxcanon=$maxcanon\n";
+print STDERR "$hangul_syllable_decomps canonical decompositions to Hangul syllables\n";
+print STDERR "$hangul_choseong_decomps canonical decompositions to Hangul Choseong\n";
+
+die "We assumed that canonical decompositions were never more than 2 long!\n"
+ if $maxcanon > 2;
+
+die "We assumed no canonical decompositions to Hangul syllables/Choseong!\n"
+ if $hangul_syllable_decomps || $hangul_choseong_decomps;