Merge pull request #37 from leonsenft/fix-string-lexing

Fixes parsing string with unicode-range sequences
diff --git a/lib/src/tokenizer.dart b/lib/src/tokenizer.dart
index a14e866..d7677f9 100644
--- a/lib/src/tokenizer.dart
+++ b/lib/src/tokenizer.dart
@@ -202,8 +202,13 @@
           } else {
             return _errorToken();
           }
-        } else if ((ch == UNICODE_U || ch == UNICODE_LOWER_U) &&
+        } else if (_inString &&
+            (ch == UNICODE_U || ch == UNICODE_LOWER_U) &&
             (_peekChar() == UNICODE_PLUS)) {
+          // `_inString` is misleading. We actually DON'T want to enter this
+          // block while tokenizing a string, but the parser sets this value to
+          // false while it IS consuming tokens within a string.
+          //
           // Unicode range: U+uNumber[-U+uNumber]
           //   uNumber = 0..10FFFF
           _nextChar(); // Skip +
diff --git a/test/declaration_test.dart b/test/declaration_test.dart
index c0c8f53..96600d8 100644
--- a/test/declaration_test.dart
+++ b/test/declaration_test.dart
@@ -16,6 +16,7 @@
 @ import url("test.css");
 .foo {
   background-color: #191919;
+  content: "u+0041";
   width: 10PX;
   height: 22mM !important;
   border-width: 20cm;
@@ -29,6 +30,7 @@
 @import "test.css";
 .foo {
   background-color: #191919;
+  content: "u+0041";
   width: 10px;
   height: 22mm !important;
   border-width: 20cm;