Commit 4be701c1 authored by 宋新宇's avatar 宋新宇

修改

parent 9c42a095
package com.lwby.marketing.att.bystory.handle;
import com.alibaba.fastjson.JSONObject;
import com.lwby.marketing.att.bystory.CallBackType;
import com.lwby.marketing.att.bystory.DyStoryUniversalProcess;
import com.lwby.marketing.flow.NodeFlow;
import com.lwby.marketing.notify.Media;
......@@ -31,12 +33,12 @@ public class ParameterSetupStoryNovelFlow extends NodeFlow<StoryNovelAction> {
//VO对像
DeliveryDeviceInfo deliveryDeviceInfo = null;
//String s = "{\"creativeId\":\"112331\",\"creativeType\":\"3\",\"adid\":\"12321\","
// + "\"clickId\":\"12312143232\",\"channel\":\"216011231\",\"bookId\":\"4322111\","
// + "\"media\":\"jrtt\",\"clickTime\":123123123,\"code\":\"12ede3e231\"}";;
//StoryLogin storyLogin = JSONObject.parseObject(s,StoryLogin.class);
String s = "{\"creativeId\":\"112331\",\"creativeType\":\"3\",\"adid\":\"12321\","
+ "\"clickId\":\"12312143232\",\"channel\":\"216011231\",\"bookId\":\"4322111\","
+ "\"media\":\"jrtt\",\"clickTime\":123123123,\"code\":\"12ede3e231\"}";;
StoryLogin storyLogin = JSONObject.parseObject(s,StoryLogin.class);
//匹配OpenId
StoryLogin storyLogin = up.get(StoryLogin.class,assembleKey(openId));
//StoryLogin storyLogin = up.get(StoryLogin.class,assembleKey(openId));
action.setStoryLogin(storyLogin);
//NovelAction对像参数填充
......@@ -44,7 +46,9 @@ public class ParameterSetupStoryNovelFlow extends NodeFlow<StoryNovelAction> {
action.setUserId(action.getUserId());
action.setCurrentDateStr(DateTimUtils.getCurrentDateString());
if (CallBackType.active.getType().equals(0)) {
action.setChannelId(action.getClientInfo().getChannel());
}
//非商店吊起参数设置
......
......@@ -32,7 +32,7 @@ public class AttrController {
private KafkaTemplate<String, String> kafkaTemplate;
@Resource
DyStoryFlowExecutor execute;
DyStoryFlowExecutor dyStoryFlowExecutor;
@RequestMapping("/testUpload")
public void testUpload() {
......@@ -49,7 +49,7 @@ public class AttrController {
event.getProductId());
try {
execute.getExecutorByStory().execute(action);
dyStoryFlowExecutor.getExecutorByStory().execute(action);
} catch (Exception e) {
throw new RuntimeException(e);
}
......
......@@ -35,3 +35,7 @@ jetcache:
type: linkedhashmap
keyConvertor: fastjson
limit: 10000
logging:
file:
path: /data/marketing/logs
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<timestamp key="DATETIME" datePattern="yyyy-MM-dd HH:mm:ss" />
<property name="FILE_LOG_PATTERN"
value="[traceId=%X{traceId}] %d{yyyy-MM-dd HH:mm:ss.SSS} -%5p %t:%c{20}:%L - %m%n"/>
<appender name="logFile"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${logging.file.path:-logs}/common-service.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logging.file.path:-logs}/history/%d{yyyy-MM-dd,aux}/common-service.%d-%i.log</fileNamePattern>
<maxHistory>30</maxHistory>
<maxFileSize>1000MB</maxFileSize>
</rollingPolicy>
<encoder>
<charset>UTF-8</charset>
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
</appender>
<appender name="errorAppender"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${logging.file.path:-logs}/common-service-error.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logging.file.path:-logs}/history/%d{yyyy-MM-dd,aux}/common-service-error.%d-%i.log</fileNamePattern>
<maxHistory>30</maxHistory>
<maxFileSize>1000MB</maxFileSize>
</rollingPolicy>
<encoder>
<charset>UTF-8</charset>
<pattern>${FILE_LOG_PATTERN}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="async_log_file" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>5000</queueSize>
<!-- 应用停止或重新部署时,等待appender刷新队列的时间,超过该时间,队列里的日志事件被丢弃,默认1秒 -->
<maxFlushTime>3000</maxFlushTime>
<!-- 新增这行为了打印栈堆信息 -->
<includeCallerData>true</includeCallerData>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="logFile"/>
</appender>
<appender name="async_error_file" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>5000</queueSize>
<!-- 应用停止或重新部署时,等待appender刷新队列的时间,超过该时间,队列里的日志事件被丢弃,默认1秒 -->
<maxFlushTime>3000</maxFlushTime>
<!-- 新增这行为了打印栈堆信息 -->
<includeCallerData>true</includeCallerData>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="errorAppender"/>
</appender>
<!-- 控制台打印 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
......@@ -10,5 +77,7 @@
<!-- Logger 根目录 -->
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="async_log_file"/>
<appender-ref ref="async_error_file"/>
</root>
</configuration>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment