`
jin8000608172
  • 浏览: 139606 次
  • 性别: Icon_minigender_1
  • 来自: 深圳
社区版块
存档分类
最新评论

系统日志logback

阅读更多
引用

logback-classic-1.0.3.jar


引用

logback-core-1.0.3.jar


引用

logstash-logback-encoder-1.2.jar


<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true">
	<!-- 将Maven pom.xml的属性应用到logback中来, 需要在pom.xml中指定build resources -->
    <property scope="system" name="APP_NAME" value="lifeix-payment" />
    <property scope="system" name="APP_VERSION" value="1.0.0-Beta1" />
    <property scope="system" name="APP_ENV" value="development" />
    <property scope="system" name="LOG_DIR" value="/usr/local/tomcat/logs" />
	<property scope="system" name="LOG_ORDER_DIR" value="/usr/local/tomcat/logs/task" />
	<property scope="system" name="LOG_ORDER_NAME" value="lifeix-payment-order" />
   
    <!-- 任务输出日志 -->
	<appender name="ordertask" class="ch.qos.logback.core.rolling.RollingFileAppender" >
		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
			<level>debug</level>
		</filter>

		<file>${LOG_ORDER_DIR}/${LOG_ORDER_NAME}.log</file>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!-- 日志每天进行rotate -->
			<fileNamePattern>${LOG_ORDER_DIR}/${LOG_ORDER_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
				<!-- 每个日志文件大小不超过2GB -->
				<maxFileSize>2000MB</maxFileSize>
			</timeBasedFileNamingAndTriggeringPolicy>
		</rollingPolicy>

		<!-- 日志输出格式 -->
		<encoder>
			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>
		</encoder>
	</appender>
    
    
	<appender name="ROLLING" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
			<level>info</level>
		</filter>

		<file>${LOG_DIR}/${APP_NAME}.log</file>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!-- rollover daily -->
			<fileNamePattern>${LOG_DIR}/${APP_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
				<!-- or whenever the file size reaches 2GB -->
				<maxFileSize>2000MB</maxFileSize>
			</timeBasedFileNamingAndTriggeringPolicy>
		</rollingPolicy>
		<encoder>
			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>
		</encoder>
	</appender>
	<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
		<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
			<level>info</level>
		</filter>
		<encoder>
			<pattern>%-20(%d{yyy-MM-dd HH:mm:ss.SSS} [%thread]) %-5level %logger{80} - %msg%n</pattern>
		</encoder>
	</appender>

	<!-- logstash appender settings -->
	<appender name="logstash" class="ch.qos.logback.core.rolling.RollingFileAppender">
		<file>${LOG_DIR}/logstash_${APP_NAME}_logback.json</file>
		<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
			<!-- rollover daily -->
			<fileNamePattern>${LOG_DIR}/logstash_${APP_NAME}_logback%d{yyyy-MM-dd}.%i.json</fileNamePattern>
			<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
				<!-- or whenever the file size reaches 2MB -->
				<maxFileSize>2000MB</maxFileSize>
			</timeBasedFileNamingAndTriggeringPolicy>
		</rollingPolicy>
		<encoding>UTF-8</encoding>
		<encoder class="net.logstash.logback.encoder.LogstashEncoder" />
	</appender>

	<root level="INFO">
		<appender-ref ref="ROLLING" />
		<appender-ref ref="CONSOLE" />
		<appender-ref ref="logstash" />
	</root>
	

	<!-- 对于一些特定的包进行默认日志级别设定 -->
	<logger name="org.kaleidofoundry.core" level="WARN" />
	<logger name="net.rubyeye.xmemcached" level="INFO" />
	<logger name="com.google.code.yanf4j.core" level="ERROR" />
	<logger name="org.apache.mina.filter.logging" level="WARN" />
	<logger name="com.lifeix.pay.api.util.PaymentOrderLog" level="DEBUG" additivity="false">
		<appender-ref ref="ordertask" />
	</logger>
</configuration>


 * Logback: the reliable, generic, fast and flexible logging framework.
package ch.qos.logback.core.rolling;

import java.io.File;
import java.io.IOException;

import static ch.qos.logback.core.CoreConstants.CODES_URL;
import ch.qos.logback.core.FileAppender;
import ch.qos.logback.core.rolling.helper.CompressionMode;
/**
 * <code>RollingFileAppender</code> extends {@link FileAppender} to backup the
 * log files depending on {@link RollingPolicy} and {@link TriggeringPolicy}.
 * <p>
 * 
 * For more information about this appender, please refer to the online manual
 * at http://logback.qos.ch/manual/appenders.html#RollingFileAppender
 * 
 * @author Heinz Richter
 * @author Ceki G&uuml;lc&uuml;
 */
public class RollingFileAppender<E> extends FileAppender<E> {
  File currentlyActiveFile;
  TriggeringPolicy<E> triggeringPolicy;
  RollingPolicy rollingPolicy;

  public void start() {
    if (triggeringPolicy == null) {
      addWarn("No TriggeringPolicy was set for the RollingFileAppender named "
          + getName());
      addWarn("For more information, please visit "+CODES_URL+"#rfa_no_tp");
      return;
    }

    // we don't want to void existing log files
    if (!append) {
      addWarn("Append mode is mandatory for RollingFileAppender");
      append = true;
    }

    if (rollingPolicy == null) {
      addError("No RollingPolicy was set for the RollingFileAppender named "
          + getName());
      addError("For more information, please visit "+CODES_URL+"rfa_no_rp");
      return;
    }

    if (isPrudent()) {
      if (rawFileProperty() != null) {
        addWarn("Setting \"File\" property to null on account of prudent mode");
        setFile(null);
      }
      if (rollingPolicy.getCompressionMode() != CompressionMode.NONE) {
        addError("Compression is not supported in prudent mode. Aborting");
        return;
      }
    }

    currentlyActiveFile = new File(getFile());
    addInfo("Active log file name: " + getFile());
    super.start();
  }

  @Override
  public void stop() {
    if(rollingPolicy != null) rollingPolicy.stop();
    if(triggeringPolicy != null) triggeringPolicy.stop();
    super.stop();
  }

  @Override
  public void setFile(String file) {
    // http://jira.qos.ch/browse/LBCORE-94
    // allow setting the file name to null if mandated by prudent mode
    if (file != null && ((triggeringPolicy != null) || (rollingPolicy != null))) {
      addError("File property must be set before any triggeringPolicy or rollingPolicy properties");
      addError("Visit "+CODES_URL+"#rfa_file_after for more information");
    }
    super.setFile(file);
  }

  @Override
  public String getFile() {
    return rollingPolicy.getActiveFileName();
  }

  /**
   * Implemented by delegating most of the rollover work to a rolling policy.
   */
  public void rollover() {
    synchronized (lock) {
      // Note: This method needs to be synchronized because it needs exclusive
      // access while it closes and then re-opens the target file.
      //
      // make sure to close the hereto active log file! Renaming under windows
      // does not work for open files.
      this.closeOutputStream();

      try {
        rollingPolicy.rollover();
      } catch (RolloverFailure rf) {
        addWarn("RolloverFailure occurred. Deferring roll-over.");
        // we failed to roll-over, let us not truncate and risk data loss
        this.append = true;
      }

      try {
        // update the currentlyActiveFile
        // http://jira.qos.ch/browse/LBCORE-90
        currentlyActiveFile = new File(rollingPolicy.getActiveFileName());

        // This will also close the file. This is OK since multiple
        // close operations are safe.
        this.openFile(rollingPolicy.getActiveFileName());
      } catch (IOException e) {
        addError("setFile(" + fileName + ", false) call failed.", e);
      }
    }
  }

  /**
   * This method differentiates RollingFileAppender from its super class.
   */
  @Override
  protected void subAppend(E event) {
    // The roll-over check must precede actual writing. This is the
    // only correct behavior for time driven triggers.

    // We need to synchronize on triggeringPolicy so that only one rollover
    // occurs at a time
    synchronized (triggeringPolicy) {
      if (triggeringPolicy.isTriggeringEvent(currentlyActiveFile, event)) {
        rollover();
      }
    }

    super.subAppend(event);
  }

  public RollingPolicy getRollingPolicy() {
    return rollingPolicy;
  }

  public TriggeringPolicy<E> getTriggeringPolicy() {
    return triggeringPolicy;
  }

  /**
   * Sets the rolling policy. In case the 'policy' argument also implements
   * {@link TriggeringPolicy}, then the triggering policy for this appender is
   * automatically set to be the policy argument.
   * 
   * @param policy
   */
  @SuppressWarnings("unchecked")
  public void setRollingPolicy(RollingPolicy policy) {
    rollingPolicy = policy;
    if (rollingPolicy instanceof TriggeringPolicy) {
      triggeringPolicy = (TriggeringPolicy<E>) policy;
    }

  }

  public void setTriggeringPolicy(TriggeringPolicy<E> policy) {
    triggeringPolicy = policy;
    if (policy instanceof RollingPolicy) {
      rollingPolicy = (RollingPolicy) policy;
    }
  }
}




/**
 * Logback: the reliable, generic, fast and flexible logging framework.
 * Copyright (C) 1999-2011, QOS.ch. All rights reserved.
 *
 * This program and the accompanying materials are dual-licensed under
 * either the terms of the Eclipse Public License v1.0 as published by
 * the Eclipse Foundation
 *
 *   or (per the licensee's choosing)
 *
 * under the terms of the GNU Lesser General Public License version 2.1
 * as published by the Free Software Foundation.
 */
package ch.qos.logback.core.rolling;

import java.io.File;
import java.util.Date;
import java.util.concurrent.Future;

import ch.qos.logback.core.CoreConstants;
import ch.qos.logback.core.rolling.helper.*;

/**
 * <code>TimeBasedRollingPolicy</code> is both easy to configure and quite
 * powerful. It allows the roll over to be made based on time. It is possible to
 * specify that the roll over occur once per day, per week or per month.
 * 
 * <p>For more information, please refer to the online manual at
 * http://logback.qos.ch/manual/appenders.html#TimeBasedRollingPolicy
 * 
 * @author Ceki G&uuml;lc&uuml;
 */
public class TimeBasedRollingPolicy<E> extends RollingPolicyBase implements
    TriggeringPolicy<E> {
  static final String FNP_NOT_SET = "The FileNamePattern option must be set before using TimeBasedRollingPolicy. ";
  static final int INFINITE_HISTORY = 0;

  // WCS: without compression suffix
  FileNamePattern fileNamePatternWCS;

  private Compressor compressor;
  private RenameUtil renameUtil = new RenameUtil();
  Future<?> future;

  private int maxHistory = INFINITE_HISTORY;
  private ArchiveRemover archiveRemover;

  TimeBasedFileNamingAndTriggeringPolicy<E> timeBasedFileNamingAndTriggeringPolicy;

  boolean cleanHistoryOnStart = false;

  public void start() {
    // set the LR for our utility object
    renameUtil.setContext(this.context);

    // find out period from the filename pattern
    if (fileNamePatternStr != null) {
      fileNamePattern = new FileNamePattern(fileNamePatternStr, this.context);
      determineCompressionMode();
    } else {
      addWarn(FNP_NOT_SET);
      addWarn(CoreConstants.SEE_FNP_NOT_SET);
      throw new IllegalStateException(FNP_NOT_SET
          + CoreConstants.SEE_FNP_NOT_SET);
    }

    compressor = new Compressor(compressionMode);
    compressor.setContext(context);

    // wcs : without compression suffix
    fileNamePatternWCS = new FileNamePattern(Compressor.computeFileNameStr_WCS(
            fileNamePatternStr, compressionMode), this.context);

    addInfo("Will use the pattern " + fileNamePatternWCS
        + " for the active file");

     if(compressionMode == CompressionMode.ZIP) {
      String zipEntryFileNamePatternStr = transformFileNamePattern2ZipEntry(fileNamePatternStr);
      zipEntryFileNamePattern = new FileNamePattern(zipEntryFileNamePatternStr, context);
    }

    if (timeBasedFileNamingAndTriggeringPolicy == null) {
      timeBasedFileNamingAndTriggeringPolicy = new DefaultTimeBasedFileNamingAndTriggeringPolicy<E>();
    }
    timeBasedFileNamingAndTriggeringPolicy.setContext(context);
    timeBasedFileNamingAndTriggeringPolicy.setTimeBasedRollingPolicy(this);
    timeBasedFileNamingAndTriggeringPolicy.start();

    // the maxHistory property is given to TimeBasedRollingPolicy instead of to
    // the TimeBasedFileNamingAndTriggeringPolicy. This makes it more convenient
    // for the user at the cost of inconsistency here.
    if (maxHistory != INFINITE_HISTORY) {
      archiveRemover = timeBasedFileNamingAndTriggeringPolicy.getArchiveRemover();
      archiveRemover.setMaxHistory(maxHistory);
      if(cleanHistoryOnStart) {
        addInfo("Cleaning on start up");
        archiveRemover.clean(new Date(timeBasedFileNamingAndTriggeringPolicy.getCurrentTime()));
      }
    }

    super.start();
  }

  private String transformFileNamePattern2ZipEntry(String fileNamePatternStr) {
    String slashified = FileFilterUtil.slashify(fileNamePatternStr);
    return FileFilterUtil.afterLastSlash(slashified);
  }

  public void setTimeBasedFileNamingAndTriggeringPolicy(
      TimeBasedFileNamingAndTriggeringPolicy<E> timeBasedTriggering) {
    this.timeBasedFileNamingAndTriggeringPolicy = timeBasedTriggering;
  }

  public TimeBasedFileNamingAndTriggeringPolicy<E> getTimeBasedFileNamingAndTriggeringPolicy() {
    return timeBasedFileNamingAndTriggeringPolicy;
  }

  public void rollover() throws RolloverFailure {

    // when rollover is called the elapsed period's file has
    // been already closed. This is a working assumption of this method.

    String elapsedPeriodsFileName = timeBasedFileNamingAndTriggeringPolicy
        .getElapsedPeriodsFileName();

    String elpasedPeriodStem = FileFilterUtil.afterLastSlash(elapsedPeriodsFileName);


    if (compressionMode == CompressionMode.NONE) {
      if (getParentsRawFileProperty() != null) {
        renameUtil.rename(getParentsRawFileProperty(), elapsedPeriodsFileName);
      } // else { nothing to do if CompressionMode == NONE and parentsRawFileProperty == null }
    } else {
      if (getParentsRawFileProperty() == null) {
        future = asyncCompress(elapsedPeriodsFileName, elapsedPeriodsFileName, elpasedPeriodStem);
      } else {
        future = renamedRawAndAsyncCompress(elapsedPeriodsFileName, elpasedPeriodStem);
      }
    }

    if (archiveRemover != null) {
      archiveRemover.clean(new Date(timeBasedFileNamingAndTriggeringPolicy.getCurrentTime()));
    }
  }

  Future asyncCompress(String nameOfFile2Compress, String nameOfCompressedFile, String innerEntryName)
      throws RolloverFailure {
    AsynchronousCompressor ac = new AsynchronousCompressor(compressor);
    return ac.compressAsynchronously(nameOfFile2Compress, nameOfCompressedFile, innerEntryName);
  }

  Future renamedRawAndAsyncCompress(String nameOfCompressedFile, String innerEntryName)
      throws RolloverFailure {
    String parentsRawFile = getParentsRawFileProperty();
    String tmpTarget = parentsRawFile + System.nanoTime() + ".tmp";
    renameUtil.rename(parentsRawFile, tmpTarget);
    return asyncCompress(tmpTarget, nameOfCompressedFile, innerEntryName);
  }

  /**
   * 
   * The active log file is determined by the value of the parent's filename
   * option. However, in case the file name is left blank, then, the active log
   * file equals the file name for the current period as computed by the
   * <b>FileNamePattern</b> option.
   * 
   * <p>The RollingPolicy must know whether it is responsible for changing the
   * name of the active file or not. If the active file name is set by the user
   * via the configuration file, then the RollingPolicy must let it like it is.
   * If the user does not specify an active file name, then the RollingPolicy
   * generates one.
   * 
   * <p> To be sure that the file name used by the parent class has been
   * generated by the RollingPolicy and not specified by the user, we keep track
   * of the last generated name object and compare its reference to the parent
   * file name. If they match, then the RollingPolicy knows it's responsible for
   * the change of the file name.
   * 
   */
  public String getActiveFileName() {
    String parentsRawFileProperty = getParentsRawFileProperty();
    if (parentsRawFileProperty != null) {
      return parentsRawFileProperty;
    } else {
      return timeBasedFileNamingAndTriggeringPolicy
          .getCurrentPeriodsFileNameWithoutCompressionSuffix();
    }
  }

  public boolean isTriggeringEvent(File activeFile, final E event) {
    return timeBasedFileNamingAndTriggeringPolicy.isTriggeringEvent(activeFile, event);
  }

  /**
   * Get the number of archive files to keep.
   * 
   * @return number of archive files to keep
   */
  public int getMaxHistory() {
    return maxHistory;
  }

  /**
   * Set the maximum number of archive files to keep.
   * 
   * @param maxHistory
   *                number of archive files to keep
   */
  public void setMaxHistory(int maxHistory) {
    this.maxHistory = maxHistory;
  }


  public boolean isCleanHistoryOnStart() {
    return cleanHistoryOnStart;
  }

  /**
   * Should archive removal be attempted on application start up? Default is false.
   * @since 1.0.1
   * @param cleanHistoryOnStart
   */
  public void setCleanHistoryOnStart(boolean cleanHistoryOnStart) {
    this.cleanHistoryOnStart = cleanHistoryOnStart;
  }


  @Override
  public String toString() {
    return "c.q.l.core.rolling.TimeBasedRollingPolicy";
  }
}



/**
 * Logback: the reliable, generic, fast and flexible logging framework.
 * Copyright (C) 1999-2011, QOS.ch. All rights reserved.
 *
 * This program and the accompanying materials are dual-licensed under
 * either the terms of the Eclipse Public License v1.0 as published by
 * the Eclipse Foundation
 *
 *   or (per the licensee's choosing)
 *
 * under the terms of the GNU Lesser General Public License version 2.1
 * as published by the Free Software Foundation.
 */
package ch.qos.logback.core.rolling;

import java.io.File;
import java.util.Date;

import ch.qos.logback.core.joran.spi.NoAutoStart;
import ch.qos.logback.core.rolling.helper.CompressionMode;
import ch.qos.logback.core.rolling.helper.FileFilterUtil;
import ch.qos.logback.core.rolling.helper.SizeAndTimeBasedArchiveRemover;
import ch.qos.logback.core.util.FileSize;

@NoAutoStart
public class SizeAndTimeBasedFNATP<E> extends
        TimeBasedFileNamingAndTriggeringPolicyBase<E> {

  int currentPeriodsCounter = 0;
  FileSize maxFileSize;
  String maxFileSizeAsString;

  @Override
  public void start() {
    // we depend on certain fields having been initialized
    // in super.start()
    super.start();

    archiveRemover = new SizeAndTimeBasedArchiveRemover(tbrp.fileNamePattern, rc);
    archiveRemover.setContext(context);

    // we need to get the correct value of currentPeriodsCounter.
    // usually the value is 0, unless the appender or the application
    // is stopped and restarted within the same period
    String regex = tbrp.fileNamePattern.toRegex(dateInCurrentPeriod);
    String stemRegex = FileFilterUtil.afterLastSlash(regex);


    computeCurrentPeriodsHighestCounterValue(stemRegex);

    started = true;
  }

  void computeCurrentPeriodsHighestCounterValue(final String stemRegex) {
    File file = new File(getCurrentPeriodsFileNameWithoutCompressionSuffix());
    File parentDir = file.getParentFile();

    File[] matchingFileArray = FileFilterUtil
            .filesInFolderMatchingStemRegex(parentDir, stemRegex);

    if (matchingFileArray == null || matchingFileArray.length == 0) {
      currentPeriodsCounter = 0;
      return;
    }
    currentPeriodsCounter = FileFilterUtil.findHighestCounter(matchingFileArray, stemRegex);

    // if parent raw file property is not null, then the next
    // counter is max  found counter+1
    if (tbrp.getParentsRawFileProperty() != null || (tbrp.compressionMode != CompressionMode.NONE)) {
      // TODO test me
      currentPeriodsCounter++;
    }
  }

  // IMPORTANT: This field can be updated by multiple threads. It follows that
  // its values may *not* be incremented sequentially. However, we don't care
  // about the actual value of the field except that from time to time the
  // expression (invocationCounter++ & invocationMask) == invocationMask) should be true.
  private int invocationCounter;
  private int invocationMask = 0x1;

  public boolean isTriggeringEvent(File activeFile, final E event) {

    long time = getCurrentTime();
    if (time >= nextCheck) {
      Date dateInElapsedPeriod = dateInCurrentPeriod;
      elapsedPeriodsFileName = tbrp.fileNamePatternWCS
              .convertMultipleArguments(dateInElapsedPeriod, currentPeriodsCounter);
      currentPeriodsCounter = 0;
      setDateInCurrentPeriod(time);
      computeNextCheck();
      return true;
    }

    // for performance reasons, check for changes every 16,invocationMask invocations
    if (((++invocationCounter) & invocationMask) != invocationMask) {
      return false;
    }
    if (invocationMask < 0x0F) {
      invocationMask = (invocationMask << 1) + 1;
    }

    if (activeFile.length() >= maxFileSize.getSize()) {
      elapsedPeriodsFileName = tbrp.fileNamePatternWCS
              .convertMultipleArguments(dateInCurrentPeriod, currentPeriodsCounter);
      currentPeriodsCounter++;
      return true;
    }

    return false;
  }

  private String getFileNameIncludingCompressionSuffix(Date date, int counter) {
    return tbrp.fileNamePattern.convertMultipleArguments(
            dateInCurrentPeriod, counter);
  }


  @Override
  public String getCurrentPeriodsFileNameWithoutCompressionSuffix() {
    return tbrp.fileNamePatternWCS.convertMultipleArguments(
            dateInCurrentPeriod, currentPeriodsCounter);
  }

  public String getMaxFileSize() {
    return maxFileSizeAsString;
  }

  public void setMaxFileSize(String maxFileSize) {
    this.maxFileSizeAsString = maxFileSize;
    this.maxFileSize = FileSize.valueOf(maxFileSize);
  }
}



 * Logback: the reliable, generic, fast and flexible logging framework.
package ch.qos.logback.classic.filter;

import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.filter.Filter;
import ch.qos.logback.core.spi.FilterReply;

/**
 * Filters events below the threshold level.
 * 
 * Events with a level below the specified
 * level will be denied, while events with a level
 * equal or above the specified level will trigger a
 * FilterReply.NEUTRAL result, to allow the rest of the
 * filter chain process the event.
 * 
 * For more information about filters, please refer to the online manual at
 * http://logback.qos.ch/manual/filters.html#thresholdFilter
 *
 * @author S&eacute;bastien Pennec
 */
public class ThresholdFilter extends Filter<ILoggingEvent> {

  Level level;
  
  @Override
  public FilterReply decide(ILoggingEvent event) {
    if (!isStarted()) {
      return FilterReply.NEUTRAL;
    }
    
    if (event.getLevel().isGreaterOrEqual(level)) {
      return FilterReply.NEUTRAL;
    } else {
      return FilterReply.DENY;
    }
  }
  
  public void setLevel(String level) {
    this.level = Level.toLevel(level);
  }
  
  public void start() {
    if (this.level != null) {
      super.start();
    }
  }
}



/**
 * Logback: the reliable, generic, fast and flexible logging framework.
 * Copyright (C) 1999-2011, QOS.ch. All rights reserved.
 *
 * This program and the accompanying materials are dual-licensed under
 * either the terms of the Eclipse Public License v1.0 as published by
 * the Eclipse Foundation
 *
 *   or (per the licensee's choosing)
 *
 * under the terms of the GNU Lesser General Public License version 2.1
 * as published by the Free Software Foundation.
 */
package ch.qos.logback.core;

import java.util.Arrays;

import ch.qos.logback.core.joran.spi.ConsoleTarget;
import ch.qos.logback.core.status.Status;
import ch.qos.logback.core.status.WarnStatus;

/**
 * ConsoleAppender appends log events to <code>System.out</code> or
 * <code>System.err</code> using a layout specified by the user. The default
 * target is <code>System.out</code>.
 * 
 * For more information about this appender, please refer to the online manual
 * at http://logback.qos.ch/manual/appenders.html#ConsoleAppender
 * 
 * @author Ceki G&uuml;lc&uuml;
 * @author Tom SH Liu
 * @author Ruediger Dohna
 */

public class ConsoleAppender<E> extends OutputStreamAppender<E> {

  protected ConsoleTarget target = ConsoleTarget.SystemOut;

  /**
   * Sets the value of the <b>Target</b> option. Recognized values are
   * "System.out" and "System.err". Any other value will be ignored.
   */
  public void setTarget(String value) {
    ConsoleTarget t = ConsoleTarget.findByName(value.trim());
    if (t == null) {
      targetWarn(value);
    } else {
      target = t;
    }
  }

  /**
   * Returns the current value of the <b>target</b> property. The default value
   * of the option is "System.out".
   * 
   * See also {@link #setTarget}.
   */
  public String getTarget() {
    return target.getName();
  }

  private void targetWarn(String val) {
    Status status = new WarnStatus("[" + val + "] should be one of "
        + Arrays.toString(ConsoleTarget.values()), this);
    status.add(new WarnStatus(
        "Using previously set target, System.out by default.", this));
    addStatus(status);
  }

  @Override
  public void start() {
    setOutputStream(target.getStream());
    super.start();
  }
}



 * Licensed under the Apache License, Version 2.0 (the "License");
package net.logstash.logback.encoder;

import static org.apache.commons.io.IOUtils.*;

import java.io.IOException;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.commons.lang.time.FastDateFormat;

import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.classic.spi.IThrowableProxy;
import ch.qos.logback.classic.spi.ThrowableProxyUtil;
import ch.qos.logback.core.Context;
import ch.qos.logback.core.CoreConstants;
import ch.qos.logback.core.encoder.EncoderBase;

import com.fasterxml.jackson.core.JsonGenerator.Feature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;

public class LogstashEncoder extends EncoderBase<ILoggingEvent> {
    
    private static final ObjectMapper MAPPER = new ObjectMapper().configure(Feature.ESCAPE_NON_ASCII, true);
    private static final FastDateFormat ISO_DATETIME_TIME_ZONE_FORMAT_WITH_MILLIS = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSZZ");
    private static final StackTraceElement DEFAULT_CALLER_DATA = new StackTraceElement("", "", "", 0);
    
    private boolean immediateFlush = true;
    
    @Override
    public void doEncode(ILoggingEvent event) throws IOException {
        
        ObjectNode eventNode = MAPPER.createObjectNode();
        eventNode.put("@timestamp", ISO_DATETIME_TIME_ZONE_FORMAT_WITH_MILLIS.format(event.getTimeStamp()));
        eventNode.put("@message", event.getFormattedMessage());
        eventNode.put("@fields", createFields(event));
        
        write(MAPPER.writeValueAsBytes(eventNode), outputStream);
        write(CoreConstants.LINE_SEPARATOR, outputStream);
        
        if (immediateFlush) {
            outputStream.flush();
        }
        
    }
    
    private ObjectNode createFields(ILoggingEvent event) {
        
        ObjectNode fieldsNode = MAPPER.createObjectNode();
        fieldsNode.put("logger_name", event.getLoggerName());
        fieldsNode.put("thread_name", event.getThreadName());
        fieldsNode.put("level", event.getLevel().toString());
        fieldsNode.put("level_value", event.getLevel().toInt());
        
        StackTraceElement callerData = extractCallerData(event);
        fieldsNode.put("caller_class_name", callerData.getClassName());
        fieldsNode.put("caller_method_name", callerData.getMethodName());
        fieldsNode.put("caller_file_name", callerData.getFileName());
        fieldsNode.put("caller_line_number", callerData.getLineNumber());
        
        IThrowableProxy throwableProxy = event.getThrowableProxy();
        if (throwableProxy != null) {
            fieldsNode.put("stack_trace", ThrowableProxyUtil.asString(throwableProxy));
        }
        
        Context context = getContext();
        if (context != null) {
            addPropertiesAsFields(fieldsNode, context.getCopyOfPropertyMap());
        }
        addPropertiesAsFields(fieldsNode, event.getMDCPropertyMap());
        
        return fieldsNode;
        
    }
    
    private void addPropertiesAsFields(final ObjectNode fieldsNode, final Map<String, String> properties) {
        if (properties != null) {
            for (Entry<String, String> entry : properties.entrySet()) {
                String key = entry.getKey();
                String value = entry.getValue();
                fieldsNode.put(key, value);
            }
        }
    }
    
    private StackTraceElement extractCallerData(final ILoggingEvent event) {
        final StackTraceElement[] ste = event.getCallerData();
        if (ste == null || ste.length == 0) {
            return DEFAULT_CALLER_DATA;
        }
        return ste[0];
    }
    
    @Override
    public void close() throws IOException {
        write(LINE_SEPARATOR, outputStream);
    }
    
    public boolean isImmediateFlush() {
        return immediateFlush;
    }
    
    public void setImmediateFlush(boolean immediateFlush) {
        this.immediateFlush = immediateFlush;
    }
    
}



package com.lifeix.pay.api.util;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 订单信息收集日志
 * 
 * @author peter
 * 
 */
public class PaymentOrderLog {

    protected final static Logger LOG = LoggerFactory.getLogger(PaymentOrderLog.class);

    public static void debug(String message) {
	LOG.debug(message);
    }

    public static void info(String message) {
	LOG.info(message);
    }

    public static void warn(String message) {
	LOG.warn(message);
    }

    public static void error(String message) {
	LOG.error(message);
    }

    public static void error(String message, Throwable e) {
	LOG.error(message, e);
    }
}

分享到:
评论

相关推荐

    将Logback日志输出到websocket

    标题中的“将Logback日志输出到websocket”指的是利用Logback这一强大的日志框架,结合WebSocket技术,实现实时的日志传输。Logback是Java世界中广泛使用的日志记录工具,它提供了高效的日志记录功能,而WebSocket则...

    shiro简单登录+logback日志记录

    《Shiro简单登录+Logback日志记录》 在现代Web开发中,权限管理和日志记录是两个不可或缺的环节。Apache Shiro是一个强大且易用的Java安全框架,提供了认证、授权、会话管理和加密等功能,而Logback作为Log4j的替代...

    Logback日志系统文件

    Logback是由log4j创始人设计的另一个开源日志组件,基于slf4j的日志规范实现的框架,性能比log4j要好。 Logback主要分为三个技术模块: logback-core:该模块为其他两个模块奠定了基础。 logback-classic:是log4j...

    Java开发-日志管理-logback框架日志系统基础

    logback-classic:它是log4j的改良版本,同时它完整实现了Slf4j门面日志框架,可以很方便地更换成其它日志系统;logback-access:访问模块与Servlet容器集成提供通过Http来访问日志的功能。 本资源基于maven,使用...

    springboot整合日志管理Logback.zip

    Spring Boot默认集成了各种日志框架,包括Logback、Log4j2等,并且默认使用Logback作为日志处理系统。Logback由log4j的创始人Ceki Gülcü开发,它比log4j更轻量级,性能更优,提供了丰富的配置选项和出色的时间...

    logback下载 日志文件jar包

    在实际应用中,根据项目需求,可以通过修改 `logback.txt` 文件来调整日志系统的行为,确保日志记录既满足调试需求,又不会过度消耗系统资源。Logback 的高效性能和强大的功能使其成为 Java 开发中的首选日志解决...

    采用slf4j+logback输出日志

    SLF4J提供了一种统一的日志接口,使得开发者可以在不改变代码的情况下切换不同的日志实现,如Log4j、Logback或Java内置的日志系统。它的核心思想是通过接口隔离日志记录的逻辑,避免与具体实现耦合。 2. **Logback...

    logback日志记录写入kafka

    总结来说,通过结合Logback、SLF4J和Kafka,我们可以构建一个强大的日志记录和分发系统。日志会被实时地发送到Kafka队列,然后可以进一步处理,例如存储到数据库、实时分析或触发警报。同时,通过自定义解析器和过滤...

    SpringBoot Logback配置,SpringBoot日志配置

    Logback是Spring Boot默认的日志系统,它提供了灵活且高效的日志记录能力。本文将深入探讨如何配置SpringBoot的Logback以满足不同日志需求。 首先,我们来看`logback-spring.xml`这个文件。它是Logback的日志配置...

    扩展logback将日志输出到Kafka实例源码

    在IT行业中,日志管理是系统监控和故障排查的关键环节。Logback是一个广泛使用的Java日志框架,它提供了高效、灵活的日志记录功能。而Kafka是一个分布式流处理平台,常用于实时数据管道和流应用,它具有高吞吐量、可...

    LogBack日志的使用

    在分布式系统中,集中式日志管理变得尤为重要。LogBack支持将日志发送到远程服务器,如通过TCP、UDP或HTTP。这需要配置`SocketAppender`或`HTTPAppender`,并指定接收日志的服务端地址和端口。 总的来说,LogBack...

    Logback+Slf4j,基于SpringBoot实现日志脱敏.zip

    总之,通过SpringBoot、Slf4j和Logback的结合,我们可以轻松地实现日志的敏感信息脱敏,提升系统的安全性。同时,这个例子也为毕业设计提供了实用的参考,让学生了解如何在实际项目中处理敏感数据,增强对隐私保护的...

    logback做日志(代码演示和文档)

    1. **条件过滤(Conditional Filters)**:可以根据环境变量、系统属性等条件动态调整日志级别。 2. **异步日志记录**:logback 提供了异步日志记录的选项,可以提高日志处理效率。 3. **日志归档**:通过...

    logback日志分目录分级别案例

    例如,当系统发生错误时,可以直接查看 `error.log` 文件,而无需在所有日志中搜索错误信息。同时,将不同级别的日志分开存储,也有利于资源管理,避免单个日志文件过大导致的处理困难。 **总结** Logback 提供了...

    logback-demo项目日志记录

    `logback-demo`项目可能会展示如何利用Logback与其他工具(如Logstash、ELK Stack或Graylog)集成,进行日志收集、解析和分析,从而实现对系统运行状况的实时监控和问题排查。 总之,`logback-demo`项目旨在演示...

    SpringBoot+Logback实现一个简单的链路追踪功能

    在Spring Boot应用中,日志系统对于问题排查和性能监控至关重要。Logback是Spring Boot默认的日志实现,它提供了一种高效且灵活的日志记录方式。本文将介绍如何利用Spring Boot和Logback来实现一个简单的链路追踪...

    springmvc log4j2 logback 注解 jackson 日志脱敏实现源码

    本资源包含的是关于`SpringMVC`、`Log4j2`、`Logback`以及`Jackson`的日志脱敏实现源码,提供了多种实现方式,旨在帮助开发者在保障信息安全的同时,充分利用日志进行系统分析。 1. **基于正则表达式的日志脱敏实现...

    Logback类库含logback.xml配置文件

    在Java应用程序中,日志系统扮演着至关重要的角色,它帮助开发者调试代码,记录错误,以及跟踪系统性能。 `logback.xml` 配置文件是 Logback 框架的核心部分,用于定制日志行为。它允许你定义日志级别(如 TRACE, ...

    Tomcat日志catalina.out过大解决方案--使用logback按日轮转.rar

    这里我们关闭了JULI(Tomcat的默认日志系统),并让`Logback`接管日志输出。 最后,将`logback.xml`文件放置在`Tomcat`的`conf`目录下,重启`Tomcat`,日志管理就会按照新的规则进行,`catalina.out`文件将不再自动...

    日志框架+Logback的jar包和配置文件

    它是Logback-classic的基础,提供了日志系统的核心实现。 `logback.xml` 是Logback的配置文件,这是定义日志行为的主要方式。通过修改此文件,我们可以控制日志的级别(如DEBUG、INFO、WARN、ERROR等)、输出目的地...

Global site tag (gtag.js) - Google Analytics