1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Implements a custom word iterator used for our spellchecker.
6
7 #include "chrome/renderer/spellchecker/spellcheck_worditerator.h"
8
9 #include <map>
10 #include <string>
11
12 #include "base/basictypes.h"
13 #include "base/logging.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/strings/utf_string_conversions.h"
16 #include "chrome/renderer/spellchecker/spellcheck.h"
17 #include "third_party/icu/source/common/unicode/normlzr.h"
18 #include "third_party/icu/source/common/unicode/schriter.h"
19 #include "third_party/icu/source/common/unicode/uscript.h"
20 #include "third_party/icu/source/i18n/unicode/ulocdata.h"
21
22 // SpellcheckCharAttribute implementation:
23
SpellcheckCharAttribute()24 SpellcheckCharAttribute::SpellcheckCharAttribute()
25 : script_code_(USCRIPT_LATIN) {
26 }
27
~SpellcheckCharAttribute()28 SpellcheckCharAttribute::~SpellcheckCharAttribute() {
29 }
30
SetDefaultLanguage(const std::string & language)31 void SpellcheckCharAttribute::SetDefaultLanguage(const std::string& language) {
32 CreateRuleSets(language);
33 }
34
GetRuleSet(bool allow_contraction) const35 base::string16 SpellcheckCharAttribute::GetRuleSet(
36 bool allow_contraction) const {
37 return allow_contraction ?
38 ruleset_allow_contraction_ : ruleset_disallow_contraction_;
39 }
40
CreateRuleSets(const std::string & language)41 void SpellcheckCharAttribute::CreateRuleSets(const std::string& language) {
42 // The template for our custom rule sets, which is based on the word-break
43 // rules of ICU 4.0:
44 // <http://source.icu-project.org/repos/icu/icu/tags/release-4-0/source/data/brkitr/word.txt>.
45 // The major differences from the original one are listed below:
46 // * It discards comments in the original rules.
47 // * It discards characters not needed by our spellchecker (e.g. numbers,
48 // punctuation characters, Hiraganas, Katakanas, CJK Ideographs, and so on).
49 // * It allows customization of the $ALetter value (i.e. word characters).
50 // * It allows customization of the $ALetterPlus value (i.e. whether or not to
51 // use the dictionary data).
52 // * It allows choosing whether or not to split a text at contraction
53 // characters.
54 // This template only changes the forward-iteration rules. So, calling
55 // ubrk_prev() returns the same results as the original template.
56 static const char kRuleTemplate[] =
57 "!!chain;"
58 "$CR = [\\p{Word_Break = CR}];"
59 "$LF = [\\p{Word_Break = LF}];"
60 "$Newline = [\\p{Word_Break = Newline}];"
61 "$Extend = [\\p{Word_Break = Extend}];"
62 "$Format = [\\p{Word_Break = Format}];"
63 "$Katakana = [\\p{Word_Break = Katakana}];"
64 // Not all the characters in a given script are ALetter.
65 // For instance, U+05F4 is MidLetter. So, this may be
66 // better, but it leads to an empty set error in Thai.
67 // "$ALetter = [[\\p{script=%s}] & [\\p{Word_Break = ALetter}]];"
68 "$ALetter = [\\p{script=%s}%s];"
69 "$MidNumLet = [\\p{Word_Break = MidNumLet}];"
70 "$MidLetter = [\\p{Word_Break = MidLetter}%s];"
71 "$MidNum = [\\p{Word_Break = MidNum}];"
72 "$Numeric = [\\p{Word_Break = Numeric}];"
73 "$ExtendNumLet = [\\p{Word_Break = ExtendNumLet}];"
74
75 "$Control = [\\p{Grapheme_Cluster_Break = Control}]; "
76 "%s" // ALetterPlus
77
78 "$KatakanaEx = $Katakana ($Extend | $Format)*;"
79 "$ALetterEx = $ALetterPlus ($Extend | $Format)*;"
80 "$MidNumLetEx = $MidNumLet ($Extend | $Format)*;"
81 "$MidLetterEx = $MidLetter ($Extend | $Format)*;"
82 "$MidNumEx = $MidNum ($Extend | $Format)*;"
83 "$NumericEx = $Numeric ($Extend | $Format)*;"
84 "$ExtendNumLetEx = $ExtendNumLet ($Extend | $Format)*;"
85
86 "$Hiragana = [\\p{script=Hiragana}];"
87 "$Ideographic = [\\p{Ideographic}];"
88 "$HiraganaEx = $Hiragana ($Extend | $Format)*;"
89 "$IdeographicEx = $Ideographic ($Extend | $Format)*;"
90
91 "!!forward;"
92 "$CR $LF;"
93 "[^$CR $LF $Newline]? ($Extend | $Format)+;"
94 "$ALetterEx {200};"
95 "$ALetterEx $ALetterEx {200};"
96 "%s" // (Allow|Disallow) Contraction
97
98 "!!reverse;"
99 "$BackALetterEx = ($Format | $Extend)* $ALetterPlus;"
100 "$BackMidNumLetEx = ($Format | $Extend)* $MidNumLet;"
101 "$BackNumericEx = ($Format | $Extend)* $Numeric;"
102 "$BackMidNumEx = ($Format | $Extend)* $MidNum;"
103 "$BackMidLetterEx = ($Format | $Extend)* $MidLetter;"
104 "$BackKatakanaEx = ($Format | $Extend)* $Katakana;"
105 "$BackExtendNumLetEx= ($Format | $Extend)* $ExtendNumLet;"
106 "$LF $CR;"
107 "($Format | $Extend)* [^$CR $LF $Newline]?;"
108 "$BackALetterEx $BackALetterEx;"
109 "$BackALetterEx ($BackMidLetterEx | $BackMidNumLetEx) $BackALetterEx;"
110 "$BackNumericEx $BackNumericEx;"
111 "$BackNumericEx $BackALetterEx;"
112 "$BackALetterEx $BackNumericEx;"
113 "$BackNumericEx ($BackMidNumEx | $BackMidNumLetEx) $BackNumericEx;"
114 "$BackKatakanaEx $BackKatakanaEx;"
115 "$BackExtendNumLetEx ($BackALetterEx | $BackNumericEx |"
116 " $BackKatakanaEx | $BackExtendNumLetEx);"
117 "($BackALetterEx | $BackNumericEx | $BackKatakanaEx)"
118 " $BackExtendNumLetEx;"
119
120 "!!safe_reverse;"
121 "($Extend | $Format)+ .?;"
122 "($MidLetter | $MidNumLet) $BackALetterEx;"
123 "($MidNum | $MidNumLet) $BackNumericEx;"
124
125 "!!safe_forward;"
126 "($Extend | $Format)+ .?;"
127 "($MidLetterEx | $MidNumLetEx) $ALetterEx;"
128 "($MidNumEx | $MidNumLetEx) $NumericEx;";
129
130 // Retrieve the script codes used by the given language from ICU. When the
131 // given language consists of two or more scripts, we just use the first
132 // script. The size of returned script codes is always < 8. Therefore, we use
133 // an array of size 8 so we can include all script codes without insufficient
134 // buffer errors.
135 UErrorCode error = U_ZERO_ERROR;
136 UScriptCode script_code[8];
137 int scripts = uscript_getCode(language.c_str(), script_code,
138 arraysize(script_code), &error);
139 if (U_SUCCESS(error) && scripts >= 1)
140 script_code_ = script_code[0];
141
142 // Retrieve the values for $ALetter and $ALetterPlus. We use the dictionary
143 // only for the languages which need it (i.e. Korean and Thai) to prevent ICU
144 // from returning dictionary words (i.e. Korean or Thai words) for languages
145 // which don't need them.
146 const char* aletter = uscript_getName(script_code_);
147 if (!aletter)
148 aletter = "Latin";
149
150 const char kWithDictionary[] =
151 "$dictionary = [:LineBreak = Complex_Context:];"
152 "$ALetterPlus = [$ALetter [$dictionary-$Extend-$Control]];";
153 const char kWithoutDictionary[] = "$ALetterPlus = $ALetter;";
154 const char* aletter_plus = kWithoutDictionary;
155 if (script_code_ == USCRIPT_HANGUL || script_code_ == USCRIPT_THAI)
156 aletter_plus = kWithDictionary;
157
158 // Treat numbers as word characters except for Arabic and Hebrew.
159 const char* aletter_extra = " [0123456789]";
160 if (script_code_ == USCRIPT_HEBREW || script_code_ == USCRIPT_ARABIC)
161 aletter_extra = "";
162
163 const char kMidLetterExtra[] = "";
164 // For Hebrew, treat single/double quoation marks as MidLetter.
165 const char kMidLetterExtraHebrew[] = "\"'";
166 const char* midletter_extra = kMidLetterExtra;
167 if (script_code_ == USCRIPT_HEBREW)
168 midletter_extra = kMidLetterExtraHebrew;
169
170 // Create two custom rule-sets: one allows contraction and the other does not.
171 // We save these strings in UTF-16 so we can use it without conversions. (ICU
172 // needs UTF-16 strings.)
173 const char kAllowContraction[] =
174 "$ALetterEx ($MidLetterEx | $MidNumLetEx) $ALetterEx {200};";
175 const char kDisallowContraction[] = "";
176
177 ruleset_allow_contraction_ = ASCIIToUTF16(
178 base::StringPrintf(kRuleTemplate,
179 aletter,
180 aletter_extra,
181 midletter_extra,
182 aletter_plus,
183 kAllowContraction));
184 ruleset_disallow_contraction_ = ASCIIToUTF16(
185 base::StringPrintf(kRuleTemplate,
186 aletter,
187 aletter_extra,
188 midletter_extra,
189 aletter_plus,
190 kDisallowContraction));
191 }
192
OutputChar(UChar c,base::string16 * output) const193 bool SpellcheckCharAttribute::OutputChar(UChar c,
194 base::string16* output) const {
195 // Call the language-specific function if necessary.
196 // Otherwise, we call the default one.
197 switch (script_code_) {
198 case USCRIPT_ARABIC:
199 return OutputArabic(c, output);
200
201 case USCRIPT_HANGUL:
202 return OutputHangul(c, output);
203
204 case USCRIPT_HEBREW:
205 return OutputHebrew(c, output);
206
207 default:
208 return OutputDefault(c, output);
209 }
210 }
211
OutputArabic(UChar c,base::string16 * output) const212 bool SpellcheckCharAttribute::OutputArabic(UChar c,
213 base::string16* output) const {
214 // Discard characters not from Arabic alphabets. We also discard vowel marks
215 // of Arabic (Damma, Fatha, Kasra, etc.) to prevent our Arabic dictionary from
216 // marking an Arabic word including vowel marks as misspelled. (We need to
217 // check these vowel marks manually and filter them out since their script
218 // codes are USCRIPT_ARABIC.)
219 if (0x0621 <= c && c <= 0x064D)
220 output->push_back(c);
221 return true;
222 }
223
OutputHangul(UChar c,base::string16 * output) const224 bool SpellcheckCharAttribute::OutputHangul(UChar c,
225 base::string16* output) const {
226 // Decompose a Hangul character to a Hangul vowel and consonants used by our
227 // spellchecker. A Hangul character of Unicode is a ligature consisting of a
228 // Hangul vowel and consonants, e.g. U+AC01 "Gag" consists of U+1100 "G",
229 // U+1161 "a", and U+11A8 "g". That is, we can treat each Hangul character as
230 // a point of a cubic linear space consisting of (first consonant, vowel, last
231 // consonant). Therefore, we can compose a Hangul character from a vowel and
232 // two consonants with linear composition:
233 // character = 0xAC00 +
234 // (first consonant - 0x1100) * 28 * 21 +
235 // (vowel - 0x1161) * 28 +
236 // (last consonant - 0x11A7);
237 // We can also decompose a Hangul character with linear decomposition:
238 // first consonant = (character - 0xAC00) / 28 / 21;
239 // vowel = (character - 0xAC00) / 28 % 21;
240 // last consonant = (character - 0xAC00) % 28;
241 // This code is copied from Unicode Standard Annex #15
242 // <http://unicode.org/reports/tr15> and added some comments.
243 const int kSBase = 0xAC00; // U+AC00: the top of Hangul characters.
244 const int kLBase = 0x1100; // U+1100: the top of Hangul first consonants.
245 const int kVBase = 0x1161; // U+1161: the top of Hangul vowels.
246 const int kTBase = 0x11A7; // U+11A7: the top of Hangul last consonants.
247 const int kLCount = 19; // The number of Hangul first consonants.
248 const int kVCount = 21; // The number of Hangul vowels.
249 const int kTCount = 28; // The number of Hangul last consonants.
250 const int kNCount = kVCount * kTCount;
251 const int kSCount = kLCount * kNCount;
252
253 int index = c - kSBase;
254 if (index < 0 || index >= kSBase + kSCount) {
255 // This is not a Hangul syllable. Call the default output function since we
256 // should output this character when it is a Hangul syllable.
257 return OutputDefault(c, output);
258 }
259
260 // This is a Hangul character. Decompose this characters into Hangul vowels
261 // and consonants.
262 int l = kLBase + index / kNCount;
263 int v = kVBase + (index % kNCount) / kTCount;
264 int t = kTBase + index % kTCount;
265 output->push_back(l);
266 output->push_back(v);
267 if (t != kTBase)
268 output->push_back(t);
269 return true;
270 }
271
OutputHebrew(UChar c,base::string16 * output) const272 bool SpellcheckCharAttribute::OutputHebrew(UChar c,
273 base::string16* output) const {
274 // Discard characters except Hebrew alphabets. We also discard Hebrew niqquds
275 // to prevent our Hebrew dictionary from marking a Hebrew word including
276 // niqquds as misspelled. (Same as Arabic vowel marks, we need to check
277 // niqquds manually and filter them out since their script codes are
278 // USCRIPT_HEBREW.)
279 // Pass through ASCII single/double quotation marks and Hebrew Geresh and
280 // Gershayim.
281 if ((0x05D0 <= c && c <= 0x05EA) || c == 0x22 || c == 0x27 ||
282 c == 0x05F4 || c == 0x05F3)
283 output->push_back(c);
284 return true;
285 }
286
OutputDefault(UChar c,base::string16 * output) const287 bool SpellcheckCharAttribute::OutputDefault(UChar c,
288 base::string16* output) const {
289 // Check the script code of this character and output only if it is the one
290 // used by the spellchecker language.
291 UErrorCode status = U_ZERO_ERROR;
292 UScriptCode script_code = uscript_getScript(c, &status);
293 if (script_code == script_code_ || script_code == USCRIPT_COMMON)
294 output->push_back(c);
295 return true;
296 }
297
298 // SpellcheckWordIterator implementation:
299
SpellcheckWordIterator()300 SpellcheckWordIterator::SpellcheckWordIterator()
301 : text_(NULL),
302 length_(0),
303 position_(UBRK_DONE),
304 attribute_(NULL),
305 iterator_(NULL) {
306 }
307
~SpellcheckWordIterator()308 SpellcheckWordIterator::~SpellcheckWordIterator() {
309 Reset();
310 }
311
Initialize(const SpellcheckCharAttribute * attribute,bool allow_contraction)312 bool SpellcheckWordIterator::Initialize(
313 const SpellcheckCharAttribute* attribute,
314 bool allow_contraction) {
315 // Create a custom ICU break iterator with empty text used in this object. (We
316 // allow setting text later so we can re-use this iterator.)
317 DCHECK(attribute);
318 UErrorCode open_status = U_ZERO_ERROR;
319 UParseError parse_status;
320 base::string16 rule(attribute->GetRuleSet(allow_contraction));
321
322 // If there is no rule set, the attributes were invalid.
323 if (rule.empty())
324 return false;
325
326 iterator_ = ubrk_openRules(rule.c_str(), rule.length(), NULL, 0,
327 &parse_status, &open_status);
328 if (U_FAILURE(open_status))
329 return false;
330
331 // Set the character attributes so we can normalize the words extracted by
332 // this iterator.
333 attribute_ = attribute;
334 return true;
335 }
336
IsInitialized() const337 bool SpellcheckWordIterator::IsInitialized() const {
338 // Return true if we have an ICU custom iterator.
339 return !!iterator_;
340 }
341
SetText(const char16 * text,size_t length)342 bool SpellcheckWordIterator::SetText(const char16* text, size_t length) {
343 DCHECK(!!iterator_);
344
345 // Set the text to be split by this iterator.
346 UErrorCode status = U_ZERO_ERROR;
347 ubrk_setText(iterator_, text, length, &status);
348 if (U_FAILURE(status))
349 return false;
350
351 // Retrieve the position to the first word in this text. We return false if
352 // this text does not have any words. (For example, The input text consists
353 // only of Chinese characters while the spellchecker language is English.)
354 position_ = ubrk_first(iterator_);
355 if (position_ == UBRK_DONE)
356 return false;
357
358 text_ = text;
359 length_ = static_cast<int>(length);
360 return true;
361 }
362
GetNextWord(base::string16 * word_string,int * word_start,int * word_length)363 bool SpellcheckWordIterator::GetNextWord(base::string16* word_string,
364 int* word_start,
365 int* word_length) {
366 DCHECK(!!text_ && length_ > 0);
367
368 word_string->clear();
369 *word_start = 0;
370 *word_length = 0;
371
372 if (!text_ || position_ == UBRK_DONE)
373 return false;
374
375 // Find a word that can be checked for spelling. Our rule sets filter out
376 // invalid words (e.g. numbers and characters not supported by the
377 // spellchecker language) so this ubrk_getRuleStatus() call returns
378 // UBRK_WORD_NONE when this iterator finds an invalid word. So, we skip such
379 // words until we can find a valid word or reach the end of the input string.
380 int next = ubrk_next(iterator_);
381 while (next != UBRK_DONE) {
382 if (ubrk_getRuleStatus(iterator_) != UBRK_WORD_NONE) {
383 if (Normalize(position_, next - position_, word_string)) {
384 *word_start = position_;
385 *word_length = next - position_;
386 position_ = next;
387 return true;
388 }
389 }
390 position_ = next;
391 next = ubrk_next(iterator_);
392 }
393
394 // There aren't any more words in the given text. Set the position to
395 // UBRK_DONE to prevent from calling ubrk_next() next time when this function
396 // is called.
397 position_ = UBRK_DONE;
398 return false;
399 }
400
Reset()401 void SpellcheckWordIterator::Reset() {
402 if (iterator_) {
403 ubrk_close(iterator_);
404 iterator_ = NULL;
405 }
406 }
407
Normalize(int input_start,int input_length,base::string16 * output_string) const408 bool SpellcheckWordIterator::Normalize(int input_start,
409 int input_length,
410 base::string16* output_string) const {
411 // We use NFKC (Normalization Form, Compatible decomposition, followed by
412 // canonical Composition) defined in Unicode Standard Annex #15 to normalize
413 // this token because it it the most suitable normalization algorithm for our
414 // spellchecker. Nevertheless, it is not a perfect algorithm for our
415 // spellchecker and we need manual normalization as well. The normalized
416 // text does not have to be NUL-terminated since its characters are copied to
417 // string16, which adds a NUL character when we need.
418 icu::UnicodeString input(FALSE, &text_[input_start], input_length);
419 UErrorCode status = U_ZERO_ERROR;
420 icu::UnicodeString output;
421 icu::Normalizer::normalize(input, UNORM_NFKC, 0, output, status);
422 if (status != U_ZERO_ERROR && status != U_STRING_NOT_TERMINATED_WARNING)
423 return false;
424
425 // Copy the normalized text to the output.
426 icu::StringCharacterIterator it(output);
427 for (UChar c = it.first(); c != icu::CharacterIterator::DONE; c = it.next())
428 attribute_->OutputChar(c, output_string);
429
430 return !output_string->empty();
431 }
432