获取文件的最后n行,类似于tail

我正在为一个web应用程序编写一个日志文件查看器,为此我想通过日志文件的行进行分页。文件中的项目以底部的最新项目为基础。

所以我需要一个tail()方法,可以从底部读取n行,并支持偏移量。这是我想到的:

def tail(f, n, offset=0):
"""Reads a n lines from f with an offset of offset lines."""
avg_line_length = 74
to_read = n + offset
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops.  apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
return lines[-to_read:offset and -offset or None]
avg_line_length *= 1.3

这是一个合理的方法吗?使用偏移量跟踪日志文件的推荐方法是什么?

183107 次浏览

假设Python 2上有一个类unix系统,你可以这样做:

import os
def tail(f, n, offset=0):
stdin,stdout = os.popen2("tail -n "+n+offset+" "+f)
stdin.close()
lines = stdout.readlines(); stdout.close()
return lines[:,-offset]

对于python 3,你可以这样做:

import subprocess
def tail(f, n, offset=0):
proc = subprocess.Popen(['tail', '-n', n + offset, f], stdout=subprocess.PIPE)
lines = proc.stdout.readlines()
return lines[:, -offset]

为了提高非常大的文件的效率(通常在logfile情况下,您可能想要使用tail),您通常希望避免读取整个文件(即使您没有将整个文件一次性读入内存)。然而,您确实需要以某种方式计算出行而不是字符的偏移量。一种可能是使用seek()逐字符向后读取,但这非常慢。相反,在更大的块中处理会更好。

我有一个实用函数,我写了一段时间向后读取文件,可以在这里使用。

import os, itertools


def rblocks(f, blocksize=4096):
"""Read file as series of blocks from end of file to start.


The data itself is in normal order, only the order of the blocks is reversed.
ie. "hello world" -> ["ld","wor", "lo ", "hel"]
Note that the file must be opened in binary mode.
"""
if 'b' not in f.mode.lower():
raise Exception("File must be opened using binary mode.")
size = os.stat(f.name).st_size
fullblocks, lastblock = divmod(size, blocksize)


# The first(end of file) block will be short, since this leaves
# the rest aligned on a blocksize boundary.  This may be more
# efficient than having the last (first in file) block be short
f.seek(-lastblock,2)
yield f.read(lastblock)


for i in range(fullblocks-1,-1, -1):
f.seek(i * blocksize)
yield f.read(blocksize)


def tail(f, nlines):
buf = ''
result = []
for block in rblocks(f):
buf = block + buf
lines = buf.splitlines()


# Return all lines except the first (since may be partial)
if lines:
result.extend(lines[1:]) # First line may not be complete
if(len(result) >= nlines):
return result[-nlines:]


buf = lines[0]


return ([buf]+result)[-nlines:]




f=open('file_to_tail.txt','rb')
for line in tail(f, 20):
print line

【编辑】增加了更具体的版本(避免需要反转两次)

这个可能比你的快。不假设线的长度。每次一个块返回文件,直到找到正确数量的'\n'字符。

def tail( f, lines=20 ):
total_lines_wanted = lines


BLOCK_SIZE = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = [] # blocks of size BLOCK_SIZE, in reverse order starting
# from the end of the file
while lines_to_go > 0 and block_end_byte > 0:
if (block_end_byte - BLOCK_SIZE > 0):
# read the last block we haven't yet read
f.seek(block_number*BLOCK_SIZE, 2)
blocks.append(f.read(BLOCK_SIZE))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
blocks.append(f.read(block_end_byte))
lines_found = blocks[-1].count('\n')
lines_to_go -= lines_found
block_end_byte -= BLOCK_SIZE
block_number -= 1
all_read_text = ''.join(reversed(blocks))
return '\n'.join(all_read_text.splitlines()[-total_lines_wanted:])

我不喜欢关于线长的复杂假设——作为一个实际问题——你永远不可能知道这样的事情。

通常,这将定位第一次或第二次通过循环时的最后20行。如果你的74个字符的东西实际上是准确的,你使块大小为2048,你几乎马上就会尾随20行。

此外,我也没有消耗大量的大脑热量,试图与物理操作系统块巧妙地对齐。使用这些高级I/O包,我怀疑您会看到尝试在OS块边界上对齐的任何性能后果。如果使用较低级别的I/O,则可能会看到加速。


更新

对于Python 3.2及以上版本,在文本文件(模式字符串中没有"b"打开的文件)中,只允许查找相对于文件开头的位置(例外是使用seek(0,2)查找到文件末尾):

例如:f = open('C:/.../../apache_logs.txt', 'rb')

 def tail(f, lines=20):
total_lines_wanted = lines


BLOCK_SIZE = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = []
while lines_to_go > 0 and block_end_byte > 0:
if (block_end_byte - BLOCK_SIZE > 0):
f.seek(block_number*BLOCK_SIZE, 2)
blocks.append(f.read(BLOCK_SIZE))
else:
f.seek(0,0)
blocks.append(f.read(block_end_byte))
lines_found = blocks[-1].count(b'\n')
lines_to_go -= lines_found
block_end_byte -= BLOCK_SIZE
block_number -= 1
all_read_text = b''.join(reversed(blocks))
return b'\n'.join(all_read_text.splitlines()[-total_lines_wanted:])

转念一想,这可能和这里的任何东西一样快。

def tail( f, window=20 ):
lines= ['']*window
count= 0
for l in f:
lines[count%window]= l
count += 1
print lines[count%window:], lines[:count%window]

这样就简单多了。它似乎确实在以一种不错的速度前进。

如果读取整个文件是可以接受的,那么使用deque。

from collections import deque
deque(f, maxlen=n)

在2.6之前,deques没有maxlen选项,但它很容易实现。

import itertools
def maxque(items, size):
items = iter(items)
q = deque(itertools.islice(items, size))
for item in items:
del q[0]
q.append(item)
return q

如果需要从文件的末尾读取文件,那么使用快速搜索(又名指数搜索)。

def tail(f, n):
assert n >= 0
pos, lines = n+1, []
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
return lines[-n:]

我最终使用的代码。我认为这是目前为止最好的:

def tail(f, n, offset=None):
"""Reads a n lines from f with an offset of offset lines.  The return
value is a tuple in the form ``(lines, has_more)`` where `has_more` is
an indicator that is `True` if there are more lines in the file.
"""
avg_line_length = 74
to_read = n + (offset or 0)


while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops.  apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
return lines[-to_read:offset and -offset or None], \
len(lines) > to_read or pos > 0
avg_line_length *= 1.3

基于S.Lott投票最多的答案(2008年9月25日21:43),但针对小文件进行了修正。

def tail(the_file, lines_2find=20):
the_file.seek(0, 2)                         #go to end of file
bytes_in_file = the_file.tell()
lines_found, total_bytes_scanned = 0, 0
while lines_2find+1 > lines_found and bytes_in_file > total_bytes_scanned:
byte_block = min(1024, bytes_in_file-total_bytes_scanned)
the_file.seek(-(byte_block+total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += the_file.read(1024).count('\n')
the_file.seek(-total_bytes_scanned, 2)
line_list = list(the_file.readlines())
return line_list[-lines_2find:]


#we read at least 21 line breaks from the bottom, block by block for speed
#21 to ensure we don't get a half line

希望这对你有用。

你可以用f.e cseek(0,2)到你的文件的末尾,然后用下面的替换readline()逐行读取:

def readline_backwards(self, f):
backline = ''
last = ''
while not last == '\n':
backline = last + backline
if f.tell() <= 0:
return backline
f.seek(-1, 1)
last = f.read(1)
f.seek(-1, 1)
backline = last
last = ''
while not last == '\n':
backline = last + backline
if f.tell() <= 0:
return backline
f.seek(-1, 1)
last = f.read(1)
f.seek(-1, 1)
f.seek(1, 1)
return backline

基于Eyecue的答案(6月10日10点21:28):这个类添加head()和tail()方法到文件对象。

class File(file):
def head(self, lines_2find=1):
self.seek(0)                            #Rewind file
return [self.next() for x in xrange(lines_2find)]


def tail(self, lines_2find=1):
self.seek(0, 2)                         #go to end of file
bytes_in_file = self.tell()
lines_found, total_bytes_scanned = 0, 0
while (lines_2find+1 > lines_found and
bytes_in_file > total_bytes_scanned):
byte_block = min(1024, bytes_in_file-total_bytes_scanned)
self.seek(-(byte_block+total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += self.read(1024).count('\n')
self.seek(-total_bytes_scanned, 2)
line_list = list(self.readlines())
return line_list[-lines_2find:]

用法:

f = File('path/to/file', 'r')
f.head(3)
f.tail(3)

如果文件不以\n结尾,或者不能确保完整的第一行被读取,那么其中一些解决方案就会出现问题。

def tail(file, n=1, bs=1024):
f = open(file)
f.seek(-1,2)
l = 1-f.read(1).count('\n') # If file doesn't end in \n, count it anyway.
B = f.tell()
while n >= l and B > 0:
block = min(bs, B)
B -= block
f.seek(B, 0)
l += f.read(block).count('\n')
f.seek(B, 0)
l = min(l,n) # discard first (incomplete) line if l > n
lines = f.readlines()[-l:]
f.close()
return lines

mmap简单快速解决方案:

import mmap
import os


def tail(filename, n):
"""Returns last n lines from the filename. No exception handling"""
size = os.path.getsize(filename)
with open(filename, "rb") as f:
# for Windows the mmap parameters are different
fm = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)
try:
for i in xrange(size - 1, -1, -1):
if fm[i] == '\n':
n -= 1
if n == -1:
break
return fm[i + 1 if i else 0:].splitlines()
finally:
fm.close()

S.Lott上面的答案几乎适用于我,但最终给了我部分的行。事实证明,它破坏了块边界上的数据,因为数据以相反的顺序保存读块。当调用“.join(data)”时,数据块的顺序是错误的。这就解决了这个问题。

def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
f - a byte file-like object
"""
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:]
我发现上面的Popen是最好的解决方案。又快又脏,而且很管用 对于Unix机器上的python 2.6,我使用以下

def GetLastNLines(self, n, fileName):
"""
Name:           Get LastNLines
Description:        Gets last n lines using Unix tail
Output:         returns last n lines of a file
Keyword argument:
n -- number of last lines to return
filename -- Name of the file you need to tail into
"""
p = subprocess.Popen(['tail','-n',str(n),self.__fileName], stdout=subprocess.PIPE)
soutput, sinput = p.communicate()
return soutput

Soutput将包含最后n行代码。要逐行迭代soutput,执行以下操作:

for line in GetLastNLines(50,'myfile.log').split('\n'):
print line

在pypi上有一些现有的tail实现,你可以使用pip安装:

  • mtFileUtil
  • multitail
  • log4tailer
  • ...

根据您的情况,使用这些现有工具中的一种可能有优势。

以下是我的答案。纯python。利用时间,它似乎很快。在一个日志文件(有10万行)中拖尾100行:

>>> timeit.timeit('tail.tail(f, 100, 4098)', 'import tail; f = open("log.txt", "r");', number=10)
0.0014600753784179688
>>> timeit.timeit('tail.tail(f, 100, 4098)', 'import tail; f = open("log.txt", "r");', number=100)
0.00899195671081543
>>> timeit.timeit('tail.tail(f, 100, 4098)', 'import tail; f = open("log.txt", "r");', number=1000)
0.05842900276184082
>>> timeit.timeit('tail.tail(f, 100, 4098)', 'import tail; f = open("log.txt", "r");', number=10000)
0.5394978523254395
>>> timeit.timeit('tail.tail(f, 100, 4098)', 'import tail; f = open("log.txt", "r");', number=100000)
5.377126932144165

代码如下:

import os




def tail(f, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []


# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1


# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError:  # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break


lines_found = f.readlines()


# we found enough lines, get out
# Removed this line because it was redundant the while will catch
# it, I left it for history
# if len(lines_found) > lines:
#    break


# decrement the block counter to get the
# next X bytes
block_counter -= 1


return lines_found[-lines:]
我不得不从文件的最后一行读取一个特定的值,偶然发现了这个线程。我没有在Python中重新发明轮子,而是最终得到了一个小型shell脚本,保存为 /usr/local/bin/get_last_netp: < / p >
#! /bin/bash
tail -n1 /home/leif/projects/transfer/export.log | awk {'print $14'}

在Python程序中:

from subprocess import check_output


last_netp = int(check_output("/usr/local/bin/get_last_netp"))

这不是使用deque的第一个例子,而是一个更简单的例子。这个是通用的:它适用于任何可迭代对象,而不仅仅是文件。

#!/usr/bin/env python
import sys
import collections
def tail(iterable, N):
deq = collections.deque()
for thing in iterable:
if len(deq) >= N:
deq.popleft()
deq.append(thing)
for thing in deq:
yield thing
if __name__ == '__main__':
for line in tail(sys.stdin,10):
sys.stdout.write(line)
This is my version of tailf


import sys, time, os


filename = 'path to file'


try:
with open(filename) as f:
size = os.path.getsize(filename)
if size < 1024:
s = size
else:
s = 999
f.seek(-s, 2)
l = f.read()
print l
while True:
line = f.readline()
if not line:
time.sleep(1)
continue
print line
except IOError:
pass
import time


attemps = 600
wait_sec = 5
fname = "YOUR_PATH"


with open(fname, "r") as f:
where = f.tell()
for i in range(attemps):
line = f.readline()
if not line:
time.sleep(wait_sec)
f.seek(where)
else:
print line, # already has newline

我对类似问题的回答上根据评论者的要求发布一个答案,在那里使用相同的技术来改变文件的最后一行,而不仅仅是得到它。

对于一个非常大的文件,mmap是最好的方法。为了改进现有的mmap答案,这个版本可以在Windows和Linux之间移植,并且应该运行得更快(尽管如果不对文件在GB范围内的32位Python进行一些修改,它将无法工作,请参阅其他关于处理这个问题的提示,以及修改以在Python 2上工作的答案)。

import io  # Gets consistent version of open for both Py2.7 and Py3.x
import itertools
import mmap


def skip_back_lines(mm, numlines, startidx):
'''Factored out to simplify handling of n and offset'''
for _ in itertools.repeat(None, numlines):
startidx = mm.rfind(b'\n', 0, startidx)
if startidx < 0:
break
return startidx


def tail(f, n, offset=0):
# Reopen file in binary mode
with io.open(f.name, 'rb') as binf, mmap.mmap(binf.fileno(), 0, access=mmap.ACCESS_READ) as mm:
# len(mm) - 1 handles files ending w/newline by getting the prior line
startofline = skip_back_lines(mm, offset, len(mm) - 1)
if startofline < 0:
return []  # Offset lines consumed whole file, nothing to return
# If using a generator function (yield-ing, see below),
# this should be a plain return, no empty list


endoflines = startofline + 1  # Slice end to omit offset lines


# Find start of lines to capture (add 1 to move from newline to beginning of following line)
startofline = skip_back_lines(mm, n, startofline) + 1


# Passing True to splitlines makes it return the list of lines without
# removing the trailing newline (if any), so list mimics f.readlines()
return mm[startofline:endoflines].splitlines(True)
# If Windows style \r\n newlines need to be normalized to \n, and input
# is ASCII compatible, can normalize newlines with:
# return mm[startofline:endoflines].replace(os.linesep.encode('ascii'), b'\n').splitlines(True)

这假设尾部的行数足够小,您可以安全地将它们全部读入内存;你也可以让它成为一个生成器函数,手动读取一行,通过替换最后一行:

        mm.seek(startofline)
# Call mm.readline n times, or until EOF, whichever comes first
# Python 3.2 and earlier:
for line in itertools.islice(iter(mm.readline, b''), n):
yield line


# 3.3+:
yield from itertools.islice(iter(mm.readline, b''), n)

最后,以二进制模式读取(必须使用mmap),因此它给出str行(Py2)和bytes行(Py3);如果你想要unicode (Py2)或str (Py3),可以调整迭代方法来为你解码和/或修复换行:

        lines = itertools.islice(iter(mm.readline, b''), n)
if f.encoding:  # Decode if the passed file was opened with a specific encoding
lines = (line.decode(f.encoding) for line in lines)
if 'b' not in f.mode:  # Fix line breaks if passed file opened in text mode
lines = (line.replace(os.linesep, '\n') for line in lines)
# Python 3.2 and earlier:
for line in lines:
yield line
# 3.3+:
yield from lines

注意:这些都是我在一台无法使用Python进行测试的机器上输入的。如果我打印了什么,请告诉我;这是类似于我的另一个答案,我认为它应该工作,但调整(例如处理offset)可能会导致微妙的错误。如果有任何错误,请在评论中告诉我。

虽然这对于大文件来说并不是很有效,但这段代码非常简单:

  1. 它读取文件对象f
  2. 它分割使用换行符返回的字符串\n
  3. 它获取数组列表的最后索引,使用负号代表最后索引,:获取子数组

    def tail(f,n):
    return "\n".join(f.read().split("\n")[-n:])
    

这是一个非常简单的实现:

with open('/etc/passwd', 'r') as f:
try:
f.seek(0,2)
s = ''
while s.count('\n') < 11:
cur = f.tell()
f.seek((cur - 10))
s = f.read(10) + s
f.seek((cur - 10))
print s
except Exception as e:
f.readlines()
将@papercrane解决方案更新到python3。 使用open(filename, 'rb')和:

打开文件
def tail(f, window=20):
"""Returns the last `window` lines of file `f` as a list.
"""
if window == 0:
return []


BUFSIZ = 1024
f.seek(0, 2)
remaining_bytes = f.tell()
size = window + 1
block = -1
data = []


while size > 0 and remaining_bytes > 0:
if remaining_bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
bunch = f.read(BUFSIZ)
else:
# file too small, start from beginning
f.seek(0, 0)
# only read what was not read
bunch = f.read(remaining_bytes)


bunch = bunch.decode('utf-8')
data.insert(0, bunch)
size -= bunch.count('\n')
remaining_bytes -= BUFSIZ
block -= 1


return ''.join(data).splitlines()[-window:]

一个更简洁的python3兼容版本,它不插入,而是追加&改变:

def tail(f, window=1):
"""
Returns the last `window` lines of file `f` as a list of bytes.
"""
if window == 0:
return b''
BUFSIZE = 1024
f.seek(0, 2)
end = f.tell()
nlines = window + 1
data = []
while nlines > 0 and end > 0:
i = max(0, end - BUFSIZE)
nread = min(end, BUFSIZE)


f.seek(i)
chunk = f.read(nread)
data.append(chunk)
nlines -= chunk.count(b'\n')
end -= nread
return b'\n'.join(b''.join(reversed(data)).splitlines()[-window:])

像这样使用它:

with open(path, 'rb') as f:
last_lines = tail(f, 3).decode('utf-8')
import itertools
fname = 'log.txt'
offset = 5
n = 10
with open(fname) as f:
n_last_lines = list(reversed([x for x in itertools.islice(f, None)][-(offset+1):-(offset+n+1):-1]))
abc = "2018-06-16 04:45:18.68"
filename = "abc.txt"
with open(filename) as myFile:
for num, line in enumerate(myFile, 1):
if abc in line:
lastline = num
print "last occurance of work at file is in "+str(lastline)

我发现了一个可能是最简单的方法来找到文件的第一行或最后N行

文件的最后N行(对于示例:N=10)

file=open("xyz.txt",'r")
liner=file.readlines()
for ran in range((len(liner)-N),len(liner)):
print liner[ran]

文件的前N行(对于示例:N=10)

file=open("xyz.txt",'r")
liner=file.readlines()
for ran in range(0,N+1):
print liner[ran]

道理很简单:

def tail(fname,nl):
with open(fname) as f:
data=f.readlines() #readlines return a list
print(''.join(data[-nl:]))

有非常有用的模块可以做到这一点:

from file_read_backwards import FileReadBackwards


with FileReadBackwards("/tmp/file", encoding="utf-8") as frb:


# getting lines by lines starting from the last line up
for l in frb:
print(l)

简单:

with open("test.txt") as f:
data = f.readlines()
tail = data[-2:]
print(''.join(tail)

更新A.Coady给出的答案

适用于python 3

它使用了指数搜索,只会从后面缓冲N行,非常有效。

import time
import os
import sys


def tail(f, n):
assert n >= 0
pos, lines = n+1, []


# set file pointer to end


f.seek(0, os.SEEK_END)


isFileSmall = False


while len(lines) <= n:
try:
f.seek(f.tell() - pos, os.SEEK_SET)
except ValueError as e:
# lines greater than file seeking size
# seek to start
f.seek(0,os.SEEK_SET)
isFileSmall = True
except IOError:
print("Some problem reading/seeking the file")
sys.exit(-1)
finally:
lines = f.readlines()
if isFileSmall:
break


pos *= 2


print(lines)


return lines[-n:]








with open("stream_logs.txt") as f:
while(True):
time.sleep(0.5)
print(tail(f,2))


另一个解决方案

如果你的TXT文件看起来像这样: 鼠标 蛇 猫 蜥蜴 狼 狗< / p >

你可以通过简单地使用python中的数组索引来反转这个文件 " < / p >

contents=[]
def tail(contents,n):
with open('file.txt') as file:
for i in file.readlines():
contents.append(i)


for i in contents[:n:-1]:
print(i)


tail(contents,-5)
< p >结果: 狗 狼 蜥蜴 猫< / p >

最简单的方法是使用deque:

from collections import deque


def tail(filename, n=10):
with open(filename) as f:
return deque(f, n)
< p > !我有一个类似的问题,虽然我只需要仅限最后一行, 所以我想出了自己的解决方案

def get_last_line(filepath):
try:
with open(filepath,'rb') as f:
f.seek(-1,os.SEEK_END)
text = [f.read(1)]
while text[-1] != '\n'.encode('utf-8') or len(text)==1:
f.seek(-2, os.SEEK_CUR)
text.append(f.read(1))
except Exception as e:
pass
return ''.join([t.decode('utf-8') for t in text[::-1]]).strip()

该函数返回文件中最后一个字符串
我有一个1.27gb的日志文件,它花了非常非常少的时间找到最后一行(甚至不到半秒)

两种方案基于从文件端计数“\n”,tail1使用内存映射,tail2不使用。速度相似,两者都很快,但mmap版本更快。这两个函数都返回最后n行(从n+1 '\n'到EOF)作为字符串。

import mmap
def tail1(fn, n=5, encoding='utf8'):
with open(fn) as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
nn = len(mm)
for i in range(n+1):
nn = mm.rfind(b'\n',0,nn)
if nn < 0: break
return mm[nn:].decode(encoding=encoding).strip()




def tail2(fn, n=5, encoding='utf8'):
with open(fn,'rb') as f:
for i in range(f.seek(0, 2), 0, -1):
_ = f.seek(i)
if f.read(1) == b'\n': n -= 1
if n < 0: break
return f.read().decode(encoding=encoding).strip()