GIS

GIS库JTS例子

JTS库依赖

1
2
3
4
5
<dependency>
    <groupId>com.vividsolutions</groupId>
    <artifactId>jts</artifactId>
    <version>1.13</version>
</dependency>

判断点是否在多边形内

1
2
3
4
5
6
7
8
9
GeometryFactory factory = new GeometryFactory();
// 定义多边形
LinearRing shell = factory.createLinearRing(cs);
Polygon polygon = factory.createPolygon(shell);
// Polygon polygon = factory.createPolygon(shell, holes);// holes 是 LinearRing 的数组
// 定义点
Coordinate coordinate = new Coordinate(lon, lat);
// 判断是否在多边形内
boolean contains = SimplePointInAreaLocator.containsPointInPolygon(coordinate, polygon);

抽稀算法(道格拉斯-普克算法 Douglas–Peucker algorithm)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 
double lon1 = 116.556408D;
double lat1 = 34.012041D;
Coordinate[] coordinates = new Coordinate[]{
  new Coordinate(lon1, lat1),
  ...
};
CoordinateSequence coordinateSequence = new CoordinateArraySequence(coordinates);
//数据精度
PrecisionModel precisionModel = new PrecisionModel(PrecisionModel.FLOATING_SINGLE);
GeometryFactory factory = new GeometryFactory(precisionModel, 4326);
LineString lineString = new LineString(coordinateSequence, factory);
 
DouglasPeuckerSimplifier simplifier = new DouglasPeuckerSimplifier(lineString);
// 抽稀释容差,比如抽稀距离20m转换成=20/110km=20/110000m
simplifier.setDistanceTolerance(0.0001D);
Geometry geometry = simplifier.getResultGeometry();
// 抽稀后的结果
Coordinate[] result = geometry.getCoordinates();
音视频

海康威视RTSP流地址

RTSP流格式

rtsp://[username]:[password]@[ip]:[port]/[codec]/[channel]/[subtype]/av_stream

说明:

参数 说明 示例
username 用户名 如admin
password 密码 123456
ip 设备IP 192.168.1.1
port 端口号,默认为554,不填写默认 554
codec 编码 h264,MPEG-4,mpeg4等
channel 通道号,起始为1,通道1则为ch1 ch1
subtype 码流类型,主码流为main,辅码流为sub main

推流

1
2
3
# ffmpeg -i "rtsp://username:passport@192.168.1.1:554/h264/ch1/sub/av_stream" -vcodec copy -preset:v ultrafast -tune:v zerolatency -acodec copy -f flv  -an "rtmp://192.168.1.1/live/haikang_01"
ffmpeg version 4.2.1 Copyright (c) 2000-2019 the FFmpeg developers
  built with Apple clang version 11.0.0 (clang-1100.0.33.8)
音视频

流媒体服务器nginx-rtmp安装

下载源代码

1
2
3
# git clone git@github.com:arut/nginx-rtmp-module.git
git clone git@github.com:winshining/nginx-http-flv-module.git
axel -n 100 http://nginx.org/download/nginx-1.17.5.tar.gz

安装依赖

1
2
3
4
5
6
7
8
9
## ubintu
sudo apt-get install openssl libssl-dev
sudo apt-get install libpcre3 libpcre3-dev
sudo apt-get install zlib1g-dev
 
## centos
sudo yum install -y pcre pcre-devel
sudo yum install -y openssl openssl-devel
sudo yum install -y zlib-devel zlib

编译安装

1
2
3
tar zxvf nginx-1.17.5.tar.gz
cd nginx-1.17.5/
 ./configure --prefix=/usr/local/nginx --add-module=/home/work/nginx-http-flv-module --with-http_ssl_module --with-debug

配置nginx用户

1
2
3
sudo useradd nginx
## sudo vim /etc/passwd
## nginx:x:1001:1001:,,,:/home/nginx:/usr/sbin/nologin

创建相关用户

1
2
3
4
5
6
7
8
# 创建相关目录并修改所有者
sudo mkdir -p /usr/local/nginx/data/dash/live
sudo mkdir -p /usr/local/nginx/data/hls/live
sudo mkdir -p /usr/local/nginx/stat
sudo cp /home/work/nginx-http-flv-module/stat.xsl /usr/local/nginx/stat/
 
sudo chown -R nginx /usr/local/nginx/data
sudo chown -R nginx /usr/local/nginx/stat

修改配置文件

见附录:示例

启动nginx服务器

1
2
3
 .
 
sudo /usr/local/nginx/sbin/nginx

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
.
 
# 推流
ffmpeg -re -i /Applications/ambari-vagrant/ubuntu18.4/data/fulankelin-hd.mp4 -c copy -f flv rtmp://u1802/live/fulankelin-hd
 
# 支持播放地址
rtmp://u1802/live/fulankelin-hd
 
http://u1802/live?app=live&stream=fulankelin-hd
http://u1802/live?port=1935&app=live&stream=fulankelin-hd
http://u1802/live/fulankelin-hd.flv
http://u1802/live/fulankelin-hd.flv?port=1935
 
http://u1802/live/fulankelin-hd.mpd
http://u1802/live/fulankelin-hd.m3u8
 
.

配置示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
user  nginx;
worker_processes  1;
 
 
# error_log  logs/error.log;
# error_log  logs/error.log  notice;
error_log  logs/error.log  debug;
 
pid        logs/nginx.pid;
 
 
events {
    worker_connections  4096;
}
 
http {
    include       mime.types;
    default_type  application/octet-stream;
 
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
 
    access_log  logs/access.log  main;
 
    sendfile        on;
    #tcp_nopush     on;
 
    #keepalive_timeout  0;
    keepalive_timeout  65;
 
    #gzip  on;
 
    server {
        listen       80;
        server_name  localhost;
 
        #charset koi8-r;
 
        #access_log  logs/host.access.log  main;
 
        location / {
            root   html;
            index  index.html index.htm;
        }
 
        #error_page  404              /404.html;
 
        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
 
        # location ~* \.(m3u8)$ {
        #     types {
        #         application/vnd.apple.mpegurl m3u8;
        #         video/mp2t ts;
        #     }
 
        #     root /usr/local/nginx/data;
        #     add_header 'Cache-Control' 'no-cache';
        # }
        location /live {
            flv_live on; # 打开http播放flv直播流的方式
            chunked_transfer_encoding on; # 支持Transfer-Encoding: chunked方式回复
 
            add_header 'Access-Control-Allow-Origin' '*';
            add_header 'Access-Control-Allow-Credentials' 'true';
        }
 
        location ~ \.(mpd|m4a|m4v)$ {
            root /usr/local/nginx/data/dash/;
            add_header 'Cache-Control' 'no-cache';
        }
        # }
        location ~ \.(m3u8|ts)$ {
            types {
                application/vnd.apple.mpegurl m3u8;
                video/mp2t ts;
            }
 
            root /usr/local/nginx/data/hls/;
            add_header 'Cache-Control' 'no-cache';
        }
 
        location ~ \.(flv)$ {
            rewrite ^/(.*)/(.*)\.(flv)$ /$1?app=$1&stream=$2 last;
        }
 
        location /stat {
            rtmp_stat all;
            rtmp_stat_stylesheet stat.xsl;
        }
        location /stat.xsl {
            root /usr/local/nginx/stat/;
        }
 
        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}
 
        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}
 
        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }
 
 
    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;
 
    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}
 
 
    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;
 
    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;
 
    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;
 
    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;
 
    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}
 
}
 
rtmp_auto_push on;
rtmp_auto_push_reconnect 1s;
rtmp_socket_dir /tmp;
 
rtmp {
    out_queue           4096;
    out_cork            8;
    max_streams         128;
    timeout             15s;
    drop_idle_publisher 15s;
 
    log_interval 5s; #log模块在access.log中记录日志的间隔时间,对调试非常有用
    log_size     1m; #log模块用来记录日志的缓冲区大小
 
    server {
        listen 1935;
        on_connect http://127.0.0.1:3000/on_connect;
 
        application live {
            live on;
            hls on;
            hls_path /usr/local/nginx/data/hls/live;
            dash on;
            dash_path /usr/local/nginx/data/dash/live;
            gop_cache on; #打开GOP缓存,减少首屏等待时间
 
            notify_update_timeout 30s;
            notify_relay_redirect off; # 启用本地流重定向on_play和on_publish远程重定向。新的流名称是用于远程重定向的RTMP URL的MD5哈希。默认为关闭。
            notify_update_strict off; # 切换on_update回调的严格模式。默认为关闭。打开所有连接错误后,超时以及HTTP解析错误和空响应均被视为更新失败并导致连接终止。
            notify_method get;
 
            on_play http://127.0.0.1:3000/on_play;
            on_publish http://127.0.0.1:3000/on_publish;
            on_done http://127.0.0.1:3000/on_done;
            on_play_done http://127.0.0.1:3000/on_play_done;
            on_publish_done http://127.0.0.1:3000/on_publish_done;
            on_record_done http://127.0.0.1:3000/on_record_done;
            on_update http://127.0.0.1:3000/on_update;
 
        }
    }
}

问题处理

1
2
3
4
5
6
7
8
9
10
11
启动报错
nginx: [warn] 4096 worker_connections exceed open file resource limit: 1024
 
# ulimit -n 65535
 
vim /etc/security/limits.conf
* soft nofile 65535
* hard nofile 65535
 
vim /etc/sysctl.conf 
fs.file-max = 6553560
ElaticSearch

elasticsearch增加磁盘

基于EasticSearch-6.2.4

检查系统状态

1
2
3
curl -XGET "http://192.168.1.1:9002/_cat/indices"
curl -XGET "http://192.168.1.1:9002/_cat/nodes"
curl -XGET "http://192.168.1.1:9002/_cat/health"

修改配置文件,增加数据目录,多个以逗号分隔

1
2
# vim config/elasticsearch.yml
path.data: /data2/es/data,/data1/es/data

检查系统状态

1
2
3
curl -XGET "http://192.168.1.1:9002/_cat/indices"
curl -XGET "http://192.168.1.1:9002/_cat/nodes"
curl -XGET "http://192.168.1.1:9002/_cat/health"

为避免节点重启造成分片重新自动分配,需要设置集群中分片不自动分配

1
2
3
4
5
6
7
8
9
curl -X PUT \
  http://192.168.1.1:9002/_cluster/settings \
  -H 'Content-Type: application/json; charset=UTF-8' \
  -H 'cache-control: no-cache' \
  -d '{
  "transient" : {
      "cluster.routing.allocation.enable" : "none"
  }
}'

关闭节点

1
2
jps -l
kill xxxx

启动服务

1
./bin/elasticsearch -d

开启分片自动分配

1
2
3
4
5
6
7
8
9
curl -X PUT \
  http://192.168.1.1:9002/_cluster/settings \
  -H 'Content-Type: application/json; charset=UTF-8' \
  -H 'cache-control: no-cache' \
  -d '{
  "transient": {
    "cluster.routing.allocation.enable": "all"
  }
}'

查看分片及索引状态

1
2
3
4
5
# 查看未分配分片数
curl -XGET 'http://192.168.1.1:9002/_cat/shards' | grep UNASSIGNED   #查看未分配的索引分片
curl -XGET "http://192.168.1.1:9002/_cat/shards/lbs_geocoder_4307?v" #查看索引分片
curl -XGET "http://192.168.1.1:9002/_cat/shards?v"
curl -XGET "http://192.168.1.1:9002/_cat/shards?v" |grep UNASSIGNED |wc -l

当没有未分配分片时,检查系统状态

1
2
3
curl -XGET "http://192.168.1.1:9002/_cat/indices"
curl -XGET "http://192.168.1.1:9002/_cat/nodes"
curl -XGET "http://192.168.1.1:9002/_cat/health"

因为负载过高等原因,有时候个别分片可能长期处于 UNASSIGNED 状态时,可以手动进行分配;reroute 接口支持五种指令:allocate_replica, allocate_stale_primary, allocate_empty_primary,move 和 cancel;默认情况下只允许手动分配副本分片(即使用 allocate_replica),所以如果要分配主分片,需要单独加一个 accept_data_loss 选项

分配主分片

1
2
3
4
5
6
7
8
9
10
11
12
13
curl -X PUT \
  http://192.168.1.1:9002/_cluster/reroute \
  -H 'Content-Type: application/json; charset=UTF-8' \
  -H 'cache-control: no-cache' \
  -d '{
  "commands" : [ {
        "allocate_stale_primary" :
            {
              "index" : "index", "shard" : 4, "node" : "node56", "accept_data_loss" : true
            }
        }
  ]
}'

分配副本

1
2
3
4
5
6
7
8
9
10
11
12
13
curl -X PUT \
  http://192.168.1.1:9002/_cluster/reroute \
  -H 'Content-Type: application/json; charset=UTF-8' \
  -H 'cache-control: no-cache' \
  -d '{
  "commands" : [ {
        "allocate_replica" :
            {
              "index" : "index", "shard" : 4, "node" : "node56"
            }
        }
  ]
}'

磁盘空间不足报 [FORBIDDEN/12/index read-only / allow delete (api)] 错误后,进行增加磁盘操作或删除无用旧索引数据,完成操作需要修改索引的状态,ES不会主动更新,命令为:

1
PUT _settings { "index":{ "blocks":{ "read_only_allow_delete":"false" } } }

磁盘扩容后,可能需要修改报警的设置,默认值会在 85% 90% 95%的磁盘空间时报警

1
2
3
4
5
6
7
8
9
10
PUT /_cluster/settings
{
  "transient": {
    "cluster.routing.allocation.disk.threshold_enabled" : true,
    "cluster.routing.allocation.disk.watermark.low": "80gb",
    "cluster.routing.allocation.disk.watermark.high": "40gb",
    "cluster.routing.allocation.disk.watermark.flood_stage":"20gb",
    "cluster.info.update.interval" : "1m"
  }
}
ElaticSearch

标准日志打印

技术栈:
Spring Cloud Sleuth:日志跟踪工具,可应用于计划任务、多线程及复杂的web请求,可与Logbook、SLF4J轻松集成,通过添加traceId、spanId等标识符来进行日志跟踪和诊断问题

依赖管理

父工程:pom.xml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
<dependencies>
    <!--log-->
    <dependency>
        <groupId>org.slf4j</groupId>
        <artifactId>slf4j-api</artifactId>
    </dependency>
    <dependency>
        <groupId>org.slf4j</groupId>
        <artifactId>jcl-over-slf4j</artifactId>
    </dependency>
    <dependency>
        <groupId>ch.qos.logback</groupId>
        <artifactId>logback-classic</artifactId>
        <version>${logback.version}</version>
    </dependency>
    <!-- 同时使用了log4j -->
    <dependency>
        <groupId>log4j</groupId>
        <artifactId>log4j</artifactId>
    </dependency>
</dependencies>
<dependencyManagement>
    <dependencies>
        <!--log-->
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-api</artifactId>
            <version>${slf4j-api.version}</version>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>jcl-over-slf4j</artifactId>
            <version>${jcl-over-slf4j.version}</version>
        </dependency>
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>${logback.version}</version>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>commons-logging</groupId>
            <artifactId>commons-logging</artifactId>
            <version>${commons-logging.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>${log4j.version}</version>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>commons-io</groupId>
            <artifactId>commons-io</artifactId>
            <version>${commons-io.version}</version>
        </dependency>
    </dependencies>
</dependencyManagement>

当前工程:pom.xml

1
2
3
4
5
6
7
8
9
<dependency>
    <groupId>org.springframework.cloud</groupId>
    <artifactId>spring-cloud-starter-sleuth</artifactId>
</dependency>
<dependency>
    <groupId>ch.qos.logback</groupId>
    <artifactId>logback-classic</artifactId>
    <scope>compile</scope>
</dependency>

日志规范

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
1.日志配置文件规范:classpath: logback-spring.xml
2.切面日志命名规范:
当前日志:
${spring.zipkin.service.name:-${spring.application.name:-}}.json
历史日志:
${spring.zipkin.service.name:-${spring.application.name:-}}.json.%d{yyyy-MM-dd}.gz
日志模板:%msg%n
编码方式:UTF-8
举例:
{"timeStamp":1561346540062,"sTime":"2019-06-24 11:22:20","eTime":"2019-06-24 11:22:20","host":"192,168.1.1","pid":"1044","serviceName":"xxxx-server","className":"com.xxxx.controller.XxxxController","methodName":"xx","traceId":"920f90a0e06ad641","duration":5,"url":"/xxxx/xx","fromHost":"192,168.1.2","inParam":{"req":{"appId":"A0001","name":"guoguo","caller":"guoguo","noise":"1526874235603","sign":"MCwCFAzrG9KBMlrA0iaV1eG9vc465I9ZAhRt3LpFOkERQIRXoQuPfncD5fz2mg\u003d\u003d","timestamp":1561346539982,"version":"1.0"}},"outParam":{"code":0,"msg":"OK","sign":"","result":{"sign":"23247d2446130e0f6d1f5877cc9df8a7d567d69f15248b3d94436c6da020ab3f","timestamp":1561346540064}}}
 
3.普通日志
文件按天分开存储
${spring.zipkin.service.name:-${spring.application.name:-}}.(warn|info|debug|warn).log.%d{yyyy-MM-dd}
日志模板:
LOG_TRACK_PATTERN:${spring.zipkin.service.name:-${spring.application.name:-}},%X{X-B3-TraceId:-},%X{X-B3-SpanId:-},%X{X-Span-Export:-}
FILE_LOG_PATTERN:%d{yyyy-MM-dd HH:mm:ss.SSS} %-4level [${LOG_TRACK_PATTERN:-,,,}] %localip ${PID:- } [%thread] [%logger{36}] - %msg%n
编码方式:UTF-8
举例:
3010301001  xxxx_xxxx   2019-06-22 02:38:13.718 1   {"name":"tianjiaguo","age":20}  2aafc9ea86934c2aa703bce492020e1c    0  
3
 
4.大数据日志
文件名按小时分隔
${spring.zipkin.service.name:-${spring.application.name:-}}.bigdata.log.%d{yyyyMMddHH}
日志模板:%msg%n
编码方式:UTF-8

添加应用名称来标识应用程序的日志

添加bootstrap.yml配置文件,spring.application.name或spring.zipkin.service.name需要配置在这个文件中,否则某些工程启动时logback获取不到它的值,会把日志写到 sping.application.name_IS_UNDEFINDED目录和下面的日志文件中

1
2
3
4
5
6
# bootstrap.yml
spring:
  application:
    name: xxxxx
logging:
  config: classpath:logback-spring.xml

日志配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# logback-spring.xml
# 重要环境变量配置
<springProperty scope="context" name="spring.zipkin.service.name" source="spring.zipkin.service.name"/>
<springProperty scope="context" name="spring.application.name" source="spring.application.name"/>
<property name="APP_NAME" value="${spring.zipkin.service.name:-${spring.application.name:-}}"/>
#
<contextName>${APP_NAME}</contextName>
# 定义本地IP地址
<conversionRule conversionWord="localip" converterClass="com.xxxxxxx.log.converter.HostIpConverter"/>
# 定义消息格式模板
<property name="LOG_TRACK_PATTERN"
              value="${APP_NAME:-},%X{X-B3-TraceId:-},%X{X-B3-SpanId:-},%X{X-Span-Export:-}"/>
<property name="CONSOLE_LOG_PATTERN"
              value="${CONSOLE_LOG_PATTERN:-%d{yyyy-MM-dd HH:mm:ss.SSS} %-4level [${LOG_TRACK_PATTERN:-,,,}] %localip ${PID:- } [%thread] [%logger{36}] - %msg%n}"/>
<property name="FILE_LOG_PATTERN" value="${FILE_LOG_PATTERN:-${CONSOLE_LOG_PATTERN:-%msg%n}}"/>
 # 使用模板
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>${CONSOLE_LOG_PATTERN}</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>
    <appender name="INFO_APPENDER" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <encoder>
            <pattern>${FILE_LOG_PATTERN:-%msg%n}</pattern>
            <charset>UTF-8</charset>
        </encoder>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <FileNamePattern>${LOG_HOME}/${APP_NAME}.info.log.%d{yyyy-MM-dd}</FileNamePattern>
            <MaxHistory>${LOG_KEEP_TIME}</MaxHistory>
        </rollingPolicy>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>
    <appender name="API_APPENDER" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${API_LOG_HOME}/${APP_NAME}.json</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${API_LOG_HOME}/${APP_NAME}.json.%d{yyyy-MM-dd}.gz</fileNamePattern>
            <maxHistory>7</maxHistory>
        </rollingPolicy>
        <filter class="ch.qos.logback.core.filter.EvaluatorFilter">
            <evaluator class="ch.qos.logback.classic.boolex.OnMarkerEvaluator">
                <marker>aspectLog</marker>
            </evaluator>
            <onMismatch>DENY</onMismatch>
            <onMatch>NEUTRAL</onMatch>
        </filter>
        <!-- 日志输出编码 -->
        <encoder>
            <pattern>%msg%n</pattern>
            <charset>utf8</charset>
        </encoder>
    </appender>
    <appender name="BIG_DATA_APPENDER" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <encoder>
            <pattern>%msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <FileNamePattern>${BIG_DATA_LOG_HOME}/${APP_NAME}.bigdata.log.%d{yyyyMMddHH}</FileNamePattern>
            <MaxHistory>${LOG_KEEP_TIME}</MaxHistory>
        </rollingPolicy>
    </appender>

工具类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import ch.qos.logback.classic.pattern.MessageConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
 
public class HostIpConverter extends MessageConverter {
 
    @Override
    public String convert(ILoggingEvent event) {
        return IpFetcher.LOCAL_HOST_ADDRESS;
    }
 
    private static class IpFetcher {
        private static final String LOCAL_HOST_ADDRESS;
 
        static {
            LOCAL_HOST_ADDRESS = NetworkUtils.getLocalHostAddress();
        }
    }
}
 
public static String getLocalHostAddress() {
        try {
            //get all network interface
            Enumeration<NetworkInterface> allNetworkInterfaces = NetworkInterface.getNetworkInterfaces();
            NetworkInterface networkInterface = null;
            //check if there are more than one network interface
            while (allNetworkInterfaces.hasMoreElements()) {
                //get next network interface
                networkInterface = allNetworkInterfaces.nextElement();
//                System.out.println("network interface: " + networkInterface.getDisplayName());
                //get all ip address that bound to this network interface
                Enumeration<InetAddress> allInetAddress = networkInterface.getInetAddresses();
                //check if there are more than one ip addresses
                //band to one network interface
                while (allInetAddress.hasMoreElements()) {
                    //get next ip address
                    InetAddress ipAddress = allInetAddress.nextElement();
                    if (ipAddress instanceof Inet4Address) {
                        return ipAddress.getHostAddress();
                    }
                }
            }
        } catch (SocketException e) {
            /* nothing */
        }
        //根据网卡获取本机配置的IP地址
        InetAddress inetAddress = null;
        try {
            inetAddress = InetAddress.getLocalHost();
            return inetAddress.getHostAddress();
        } catch (UnknownHostException e) {
            /*nothing*/
            return "127.0.0.1";
        }
    }