changeset: 95100:817f1f47824c parent: 95098:e639750ecd92 parent: 95099:104c55bc2276 user: Serhiy Storchaka date: Fri Mar 20 16:48:02 2015 +0200 files: Lib/smtpd.py Lib/test/test_buffer.py Lib/test/test_tokenize.py description: Issue #23681: Fixed Python 2 to 3 poring bugs. Indexing bytes retiurns an integer, not bytes. diff -r e639750ecd92 -r 817f1f47824c Lib/poplib.py --- a/Lib/poplib.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/poplib.py Fri Mar 20 16:48:02 2015 +0200 @@ -136,7 +136,7 @@ # so only possibilities are ...LF, ...CRLF, CR...LF if line[-2:] == CRLF: return line[:-2], octets - if line[0] == CR: + if line[:1] == CR: return line[1:-1], octets return line[:-1], octets diff -r e639750ecd92 -r 817f1f47824c Lib/quopri.py --- a/Lib/quopri.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/quopri.py Fri Mar 20 16:48:02 2015 +0200 @@ -145,7 +145,7 @@ new = new + c; i = i+1 elif i+1 == n and not partial: partial = 1; break - elif i+1 < n and line[i+1] == ESCAPE: + elif i+1 < n and line[i+1:i+2] == ESCAPE: new = new + ESCAPE; i = i+2 elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]): new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3 diff -r e639750ecd92 -r 817f1f47824c Lib/smtpd.py --- a/Lib/smtpd.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/smtpd.py Fri Mar 20 16:48:02 2015 +0200 @@ -154,7 +154,7 @@ else: self._emptystring = b'' self._linesep = b'\r\n' - self._dotsep = b'.' + self._dotsep = ord(b'.') self._newline = b'\n' self._set_rset_state() self.seen_greeting = '' diff -r e639750ecd92 -r 817f1f47824c Lib/sunau.py --- a/Lib/sunau.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/sunau.py Fri Mar 20 16:48:02 2015 +0200 @@ -210,12 +210,9 @@ self._framesize = self._framesize * self._nchannels if self._hdr_size > 24: self._info = file.read(self._hdr_size - 24) - for i in range(len(self._info)): - if self._info[i] == b'\0': - self._info = self._info[:i] - break + self._info, _, _ = self._info.partition(b'\0') else: - self._info = '' + self._info = b'' try: self._data_pos = file.tell() except (AttributeError, OSError): diff -r e639750ecd92 -r 817f1f47824c Lib/test/test_buffer.py --- a/Lib/test/test_buffer.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/test/test_buffer.py Fri Mar 20 16:48:02 2015 +0200 @@ -150,15 +150,15 @@ format character.""" x = randrange(*fmtdict[mode][char]) if char == 'c': - x = bytes(chr(x), 'latin1') + x = bytes([x]) + if obj == 'numpy' and x == b'\x00': + # http://projects.scipy.org/numpy/ticket/1925 + x = b'\x01' if char == '?': x = bool(x) if char == 'f' or char == 'd': x = struct.pack(char, x) x = struct.unpack(char, x)[0] - if obj == 'numpy' and x == b'\x00': - # http://projects.scipy.org/numpy/ticket/1925 - x = b'\x01' return x def gen_item(fmt, obj): diff -r e639750ecd92 -r 817f1f47824c Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py Fri Mar 20 16:12:43 2015 +0200 +++ b/Lib/test/test_tokenize.py Fri Mar 20 16:48:02 2015 +0200 @@ -1068,7 +1068,7 @@ encoding = object() encoding_used = None def mock_detect_encoding(readline): - return encoding, ['first', 'second'] + return encoding, [b'first', b'second'] def mock__tokenize(readline, encoding): nonlocal encoding_used @@ -1087,7 +1087,7 @@ counter += 1 if counter == 5: return b'' - return counter + return str(counter).encode() orig_detect_encoding = tokenize_module.detect_encoding orig__tokenize = tokenize_module._tokenize @@ -1095,7 +1095,8 @@ tokenize_module._tokenize = mock__tokenize try: results = tokenize(mock_readline) - self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4]) + self.assertEqual(list(results), + [b'first', b'second', b'1', b'2', b'3', b'4']) finally: tokenize_module.detect_encoding = orig_detect_encoding tokenize_module._tokenize = orig__tokenize