logback如何自定义日志存储
作者:吾追无所求
这篇文章主要介绍了logback如何自定义日志存储的操作,具有很好的参考价值,希望对大家有所帮助。如有错误或未考虑完全的地方,望不吝赐教
logback自定义日志存储
1、配置lockback.xml
<?xml version="1.0" encoding="UTF-8"?> <configuration> <property name="LOG_HOME" value="/wzwsq-log" /> <property name="APP_NAME" value="wzwsq" /> <!-- 控制台输出 --> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 --> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %X{address} %-5level %logger{50} - %msg%n</pattern> </encoder> </appender> <!-- 按照每天生成日志文件 --> <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <!--日志文件输出的文件名 --> <FileNamePattern>${LOG_HOME}/${APP_NAME}.log.%d{yyyy-MM-dd}.log</FileNamePattern> <!--日志文件保留天数 --> <MaxHistory>10</MaxHistory> </rollingPolicy> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 --> <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %X{address} %-5level %logger{50} - %msg%n</pattern> <!--<pattern>%-4relative [%thread] %-5level %logger{35} - %msg%n</pattern> --> </encoder> </appender> <!--连接数据库配置 class:日志保存操作类 --> <appender name="db_classic_mysql_pool" class="wzwsq.config.LogDBAppender"> <connectionSource class="ch.qos.logback.core.db.DataSourceConnectionSource"> <dataSource class="org.apache.commons.dbcp.BasicDataSource"> <driverClassName>com.mysql.jdbc.Driver</driverClassName> <url>jdbc:mysql://localhost:3306/wzwsq?serverTimezone=GMT%2B8&useSSL=false&characterEncoding=utf8</url> <username>root</username> <password>123456</password> </dataSource> </connectionSource> </appender> <!-- 日志输出级别 --> <root level="info"> <appender-ref ref="STDOUT" /> <appender-ref ref="FILE" /> <!--添加自定义操作配置--> <appender-ref ref="db_classic_mysql_pool" /> </root> </configuration>
2、配置自定义日志操作类
import ch.qos.logback.classic.spi.CallerData; import ch.qos.logback.classic.spi.ILoggingEvent; import ch.qos.logback.core.db.DBAppenderBase; import com.alibaba.fastjson.JSONObject; import wzwsq.model.IpInfo; //自定义IP对象 import wzwsq.model.UsersModel; //自定义用户对象 import wzwsq.util.Constant; //自定义常量对象 import wzwsq.util.WebUtils; //自定义Web工具对象 import org.springframework.context.annotation.Configuration; import java.lang.reflect.Method; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Timestamp; /** * @ClassName LogDBAppender * @Description: 自定义日志保存至数据库 * @Author wzwsq * @Date 2020/12/10 * @Version V1.0 **/ @Configuration public class LogDBAppender extends DBAppenderBase<ILoggingEvent> { protected static final Method GET_GENERATED_KEYS_METHOD; //插入sql protected String insertSQL; //自定义存储字段 /** * menu_type:操作类型,指的是菜单ID * record_id:相关操作对象的ID * operation_content:操作内容,自定义编辑 * add_id:操作人ID * add_time:操作时间 * ip:根据IP对应城市 * city:ip所属城市 * ua:浏览器信息 * */ static final int MENU_TYPE = 1; static final int RECORD_ID = 2; static final int OPERATION_CONTENT = 3; static final int ADD_ID = 4; static final int ADD_TIME = 5; static final int IP = 6; static final int CITY = 7; static final int UA = 8; static final StackTraceElement EMPTY_CALLER_DATA = CallerData.naInstance(); static { // PreparedStatement.getGeneratedKeys() method was added in JDK 1.4 Method getGeneratedKeysMethod; try { // the getGeneratedKeysMethod = PreparedStatement.class.getMethod("getGeneratedKeys", (Class[]) null); } catch (Exception ex) { getGeneratedKeysMethod = null; } GET_GENERATED_KEYS_METHOD = getGeneratedKeysMethod; } @Override public void start() { // 将写好的sql语句赋值给insertSQL insertSQL = buildInsertSQL(); super.start(); } // 自己写新增sql语句 private static String buildInsertSQL() { return "INSERT INTO `operation_log`" + "(" + "`menu_type`,`record_id`," + "`operation_content`,`add_id`," + "`add_time`,`ip`," + "`city`,`ua`" + ")" + "VALUES (?,?,?,?,?,?,?,?)"; } @Override protected Method getGeneratedKeysMethod() { return GET_GENERATED_KEYS_METHOD; } @Override protected String getInsertSQL() { return insertSQL; } /** * 主要修改的方法 * * @param stmt * @param event * @throws SQLException */ private void bindLoggingEventWithInsertStatement(PreparedStatement stmt, ILoggingEvent event) throws SQLException { // event.getFormattedMessage() 日志打印内容 String message = event.getFormattedMessage(); // 如果只想存储自己打印的日志,可以这样写日志: // logger.info("'MENU_TYPE': '{}','RECORD_ID': '{}','OPERATION_CONTENT': '{}'",XXX,XXX,XXX) //判断当前日志信息是否属于自定义类型 int MENU_TYPE_FLAG=message.indexOf("MENU_TYPE"); int RECORD_ID_FLAG=message.indexOf("RECORD_ID"); int OPERATION_CONTENT_FLAG=message.indexOf("OPERATION_CONTENT"); if(MENU_TYPE_FLAG>0&&RECORD_ID_FLAG>0&&OPERATION_CONTENT_FLAG>0){ //截取用户自定义的日志信息 JSONObject jsonObject =JSONObject.parseObject(message); String menuType=jsonObject.get("MENU_TYPE").toString(); String recordId=jsonObject.get("RECORD_ID").toString(); String operationContent=jsonObject.get("OPERATION_CONTENT").toString(); //获取当前使用系统的用户对象、IP、CITY、UA UsersModel usersModel=WebUtils.getUser();//用户登录对象 IpInfo ipInfo=(IpInfo)WebUtils.getSession().getAttribute(Constant.IP_INFO);//用户登录IP信息 String ip=ipInfo.getIp(); String city=ipInfo.getCity(); String ua=ipInfo.getUa(); stmt.setString(MENU_TYPE, menuType); stmt.setString(RECORD_ID, recordId); stmt.setString(OPERATION_CONTENT, operationContent); stmt.setString(ADD_ID,usersModel.getId().toString()); stmt.setTimestamp(ADD_TIME, new Timestamp(event.getTimeStamp())); stmt.setString(IP, ip); stmt.setString(CITY,city==null?"":city.toString()); stmt.setString(UA, ua==null?"":ua.toString()); } } @Override protected void subAppend(ILoggingEvent eventObject, Connection connection, PreparedStatement statement) throws Throwable { bindLoggingEventWithInsertStatement(statement, eventObject); // This is expensive... should we do it every time? int updateCount = statement.executeUpdate(); if (updateCount != 1) { addWarn("Failed to insert loggingEvent"); } } @Override protected void secondarySubAppend(ILoggingEvent eventObject, Connection connection, long eventId) throws Throwable { } }
3、调用方法
private static Logger logger = LoggerFactory.getLogger(UsersController.class); logger.info("'MENU_TYPE': '{}','RECORD_ID': '{}','OPERATION_CONTENT': '{}'",XXX,XXX,XXX);
注意事项:在logback.xml中appender标签一定的写在root标签之前
使用logback进行系统日志记录
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>1.7.25</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>1.1.11</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>1.1.11</version> </dependency>
logback-spring.xml
<?xml version="1.0" encoding="UTF-8"?> <configuration> <property name="log.base" value="/data1/logs/applogs/dt-mapping-api" /> <appender name="stdout" class="ch.qos.logback.core.ConsoleAppender"> <encoder> <pattern>%date %.-5level %class{100} ----------->> %msg%n</pattern> </encoder> </appender> <appender name="logfile" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${log.base}/default.log</file> <!-- 配置日志所生成的目录以及生成文件名的规则 在logs/mylog-2017-06-31.0.log.zip --> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${log.base}.%d{yyyy-MM-dd}.%i.zip</fileNamePattern> <!-- 如果按天来回滚,则最大保存时间为30天,30天之前的都将被清理掉 --> <maxHistory>30</maxHistory> <!-- 日志总保存量为10GB --> <totalSizeCap>10 GB</totalSizeCap> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <!--文件达到 最大128MB时会被压缩和切割 --> <maxFileSize>128 MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <encoder> <pattern>%date [%thread] %.-5level %class{25} - %msg%n</pattern> </encoder> </appender> <appender name="errorfile" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${log.base}/error.log</file> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>ERROR</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> <!-- 配置日志所生成的目录以及生成文件名的规则 在logs/mylog-2017-06-31.0.log.zip --> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${log.base}/error.%d{yyyy-MM-dd}.%i.zip</fileNamePattern> <!-- 如果按天来回滚,则最大保存时间为30天,30天之前的都将被清理掉 --> <maxHistory>30</maxHistory> <!-- 日志总保存量为10GB --> <totalSizeCap>10 GB</totalSizeCap> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <!--文件达到 最大128MB时会被压缩和切割 --> <maxFileSize>128 MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <encoder> <pattern>%date [%thread] %.-5level %class{25} - %msg%n</pattern> </encoder> </appender> <logger name="com.netflix.curator" level="OFF" /> <root level = "DEBUG"> <appender-ref ref="errorfile" /> <appender-ref ref="logfile" /> <appender-ref ref="stdout" /> </root> </configuration>
private final static Logger logger = LoggerFactory.getLogger(Application.class); logger.info("批次号: {}",111111111111); logger.error("xxx失败: {}",e);
以上为个人经验,希望能给大家一个参考,也希望大家多多支持脚本之家。