diff --git a/mopidy/mpd/tokenize.py b/mopidy/mpd/tokenize.py index f10e349f..554df9e6 100644 --- a/mopidy/mpd/tokenize.py +++ b/mopidy/mpd/tokenize.py @@ -27,24 +27,41 @@ PARAM_RE = re.compile(r""" (.*) # Possibly a remainder to be parsed """ % {'unprintable': ''.join(map(chr, range(0x21)))}, re.VERBOSE) +BAD_QUOTED_PARAM_RE = re.compile(r""" + ^ + "[^"\\]*(?:\\.[^"\\]*)* # start of a quoted value + (?: # followed by: + ("[^\s]) # non-escaped quote, followed by non-whitespace + | # or + ([^"]) # anything that is not a quote + ) + """, re.VERBOSE) + UNESCAPE_RE = re.compile(r'\\(.)') # Backslash escapes any following char. def split(line): if not line.strip(): - raise Error('No command given') + raise Error('No command given') # 5@0 match = WORD_RE.match(line) if not match: - raise Error('Invalid word character') + raise Error('Invalid word character') # 5@0 whitespace, command, remainder = match.groups() if whitespace: - raise Error('Letter expected') - result = [command] + raise Error('Letter expected') # 5@0 + result = [command] while remainder: match = PARAM_RE.match(remainder) if not match: - raise Error('Invalid unquoted character') + # Following checks are simply to match MPD error messages: + match = BAD_QUOTED_PARAM_RE.match(remainder) + if match: + if match.group(1): + raise Error('Space expected after closing \'"\'') # 2@0 + else: + raise Error('Missing closing \'"\'') # 2@0 + raise Error('Invalid unquoted character') # 2@0 unquoted, quoted, remainder = match.groups() result.append(unquoted or UNESCAPE_RE.sub(r'\g<1>', quoted)) diff --git a/tests/mpd/test_tokenizer.py b/tests/mpd/test_tokenizer.py index 29e25cae..7c014c84 100644 --- a/tests/mpd/test_tokenizer.py +++ b/tests/mpd/test_tokenizer.py @@ -59,7 +59,6 @@ class TestTokenizer(unittest.TestCase): msg = 'Invalid unquoted character' self.assertTokenizeRaisesError('test par"m', msg) self.assertTokenizeRaisesError('test foo\bbar', msg) - self.assertTokenizeRaisesError('test "foo"bar', msg) self.assertTokenizeRaisesError('test foo"bar"baz', msg) self.assertTokenizeRaisesError('test foo\'bar', msg) @@ -91,10 +90,11 @@ class TestTokenizer(unittest.TestCase): self.assertTokenizeEquals(['test', 'param'], 'test "param"\t\t') def test_quoted_param_invalid_chars(self): - # TODO: Figure out how to check for " without space behind it. - #msg = """Space expected after closing '"'""" - msg = 'Invalid unquoted character' - self.assertTokenizeRaisesError('test "par"m"', msg) + msg = 'Space expected after closing \'"\'' + self.assertTokenizeRaisesError('test "foo"bar"', msg) + self.assertTokenizeRaisesError('test "foo"bar" ', msg) + self.assertTokenizeRaisesError('test "foo"bar', msg) + self.assertTokenizeRaisesError('test "foo"bar ', msg) def test_quoted_param_numbers(self): self.assertTokenizeEquals(['test', '123'], 'test "123"') @@ -135,3 +135,7 @@ class TestTokenizer(unittest.TestCase): def test_unbalanced_quotes(self): msg = 'Invalid unquoted character' self.assertTokenizeRaisesError('test "foo bar" baz"', msg) + + def test_missing_closing_quote(self): + self.assertTokenizeRaisesError('test "foo', 'Missing closing \'"\'') + self.assertTokenizeRaisesError('test "foo a ', 'Missing closing \'"\'')