参考官方案例:https://docs.python.org/zh-cn/3.8/howto/logging-cookbook.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
|
import logging import logging.config import logging.handlers from multiprocessing import Process, Queue import random import threading import time def logger_thread(q): while True : record = q.get() if record is None : break logger = logging.getLogger(record.name) logger.handle(record) def worker_process(q): qh = logging.handlers.QueueHandler(q) root = logging.getLogger() root.setLevel(logging.DEBUG) root.addHandler(qh) levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] loggers = [ 'foo' , 'foo.bar' , 'foo.bar.baz' , 'spam' , 'spam.ham' , 'spam.ham.eggs' ] for i in range ( 100 ): lvl = random.choice(levels) logger = logging.getLogger(random.choice(loggers)) logger.log(lvl, 'Message no. %d' , i) if __name__ = = '__main__' : q = Queue() d = { 'version' : 1 , 'formatters' : { 'detailed' : { 'class' : 'logging.Formatter' , 'format' : '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s' } }, 'handlers' : { 'console' : { 'class' : 'logging.StreamHandler' , 'level' : 'INFO' , }, 'file' : { 'class' : 'logging.FileHandler' , 'filename' : 'mplog.log' , 'mode' : 'w' , 'formatter' : 'detailed' , 'foofile' : { 'filename' : 'mplog-foo.log' , 'errors' : { 'filename' : 'mplog-errors.log' , 'level' : 'ERROR' , 'loggers' : { 'foo' : { 'handlers' : [ 'foofile' ] 'root' : { 'level' : 'DEBUG' , 'handlers' : [ 'console' , 'file' , 'errors' ] } workers = [] for i in range ( 5 ): wp = Process(target = worker_process, name = 'worker %d' % (i + 1 ), args = (q,)) workers.append(wp) wp.start() logging.config.dictConfig(d) lp = threading.Thread(target = logger_thread, args = (q,)) lp.start() # At this point, the main process could do some useful work of its own # Once it's done that, it can wait for the workers to terminate... for wp in workers: wp.join() # And now tell the logging thread to finish up, too q.put( None ) lp.join() |
实战案例:
1、字典形式配置日志
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
log_conf_dict = { 'version' : 1 , 'formatters' : { 'my_formatter' : { 'class' : 'logging.Formatter' , 'format' : '%(asctime)s %(processName)s(%(process)d) %(threadName)s(%(thread)d) %(filename)s[line:%(lineno)d] %(levelname)s %(message)s' } }, 'handlers' : { 'console' : { 'class' : 'logging.StreamHandler' , 'level' : 'INFO' , 'formatter' : 'my_formatter' , }, 'file' : { 'class' : 'logging.handlers.RotatingFileHandler' , 'filename' : '/log/test.log' , 'maxBytes' : 5 * 1024 * 1024 , 'backupCount' : 60 , 'mode' : 'w' , 'delay' : True , 'formatter' : 'my_formatter' , 'encoding' : 'utf-8' , 'level' : 'INFO' , }, }, 'loggers' : { 'my_logger' : { 'handlers' : [ 'file' ] } }, 'root' : { 'level' : _level, 'handlers' : [ 'console' , 'file' ] }, } |
2、主进程中开启独立的日志写入监听线程
1
2
3
4
5
|
"""主进程中开启独立的日志写入监听线程""" queue = Queue( - 1 ) logging.config.dictConfig( dict ) log_thread = threading.Thread(target = logger_main, args = (queue,)) log_thread.start() """其他逻辑代码段""" queue.put( None ) log_thread.join() |
日志写入函数
1
2
3
4
5
6
7
8
|
def logger_main(q): '''日志队列写入文件''' while True : record = q.get() if record is None : break logger = logging.getLogger() logger.handle(record) |
3、子进程中将日志输入QueueHandler日志队列
1
2
3
4
5
6
7
8
|
def child_proc_main(queue): lqh = logging.handlers.QueueHandler(queue) lqh.set_name( "my_queue_handler" ) root = logging.getLogger() #很关键的一步,必须先清空,再加入。原因:多进程多线程复杂环境下,在window和linux平台运行表现不一致,linux会复制主进程的日志配置,造成同时输出多个日志文件。 root.handlers.clear() root.addHandler(lqh) root.setLevel(level) |
到此这篇关于python logging多进程多线程输出到同一个日志文件的文章就介绍到这了,更多相关python logging日志文件内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!
原文链接:https://www.cnblogs.com/OnlyDreams/p/15923001.html