comment
stringlengths 16
8.84k
| method_body
stringlengths 37
239k
| target_code
stringlengths 0
242
| method_body_after
stringlengths 29
239k
| context_before
stringlengths 14
424k
| context_after
stringlengths 14
284k
|
---|---|---|---|---|---|
boolean变量命名可以不用添加 whether、is 等介词, 只需要描述这个判断是作为什么的即可。 比如这个变量改为updateScenario的话, if(!updateScenario)读起来是否比if(whetherUpdateScenario)更流畅一些?
|
public ApiScenarioReport updateUiScenario(List<ApiScenarioReportResult> requestResults, ResultDTO dto) {
long errorSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Error.name())).count();
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
UiScenarioWithBLOBs scenario = uiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (scenario == null) {
scenario = uiScenarioMapper.selectByPrimaryKey(report.getScenarioId());
}
if (scenario != null) {
boolean whetherUpdateScenario = updateUiScenario(requestResults, dto, errorSize, status, report, scenario);
if (!whetherUpdateScenario) return report;
}
TestPlanUiScenario testPlanUiScenario = testPlanUiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanUiScenario != null) {
report.setScenarioId(testPlanUiScenario.getUiScenarioId());
report.setEndTime(System.currentTimeMillis());
testPlanUiScenario.setLastResult(status);
long successSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / requestResults.size());
testPlanUiScenario.setPassRate(passRate);
testPlanUiScenario.setReportId(report.getId());
report.setEndTime(System.currentTimeMillis());
testPlanUiScenario.setUpdateTime(System.currentTimeMillis());
testPlanUiScenarioMapper.updateByPrimaryKeySelective(testPlanUiScenario);
if(scenario == null){
scenario = uiScenarioMapper.selectByPrimaryKey(testPlanUiScenario.getUiScenarioId());
}
updateUiScenario(requestResults, dto, errorSize, status, report, scenario);
}
return report;
}
|
if (!whetherUpdateScenario) return report;
|
public ApiScenarioReport updateUiScenario(List<ApiScenarioReportResult> requestResults, ResultDTO dto) {
long errorSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Error.name())).count();
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
UiScenarioWithBLOBs scenario = uiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (scenario == null) {
scenario = uiScenarioMapper.selectByPrimaryKey(report.getScenarioId());
}
if (scenario != null) {
boolean whetherUpdateScenario = updateUiScenario(requestResults, dto, errorSize, status, report, scenario);
if (!whetherUpdateScenario) return report;
}
TestPlanUiScenario testPlanUiScenario = testPlanUiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanUiScenario != null) {
report.setScenarioId(testPlanUiScenario.getUiScenarioId());
report.setEndTime(System.currentTimeMillis());
testPlanUiScenario.setLastResult(status);
long successSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / requestResults.size());
testPlanUiScenario.setPassRate(passRate);
testPlanUiScenario.setReportId(report.getId());
report.setEndTime(System.currentTimeMillis());
testPlanUiScenario.setUpdateTime(System.currentTimeMillis());
testPlanUiScenarioMapper.updateByPrimaryKeySelective(testPlanUiScenario);
if(scenario == null){
scenario = uiScenarioMapper.selectByPrimaryKey(testPlanUiScenario.getUiScenarioId());
}
updateUiScenario(requestResults, dto, errorSize, status, report, scenario);
}
return report;
}
|
class ApiScenarioReportService {
@Resource
private ExtApiScenarioReportMapper extApiScenarioReportMapper;
@Resource
private ApiScenarioReportMapper apiScenarioReportMapper;
@Resource
private ApiScenarioReportDetailMapper apiScenarioReportDetailMapper;
@Resource
private ApiScenarioReportResultMapper apiScenarioReportResultMapper;
@Resource
private ApiScenarioReportResultService apiScenarioReportResultService;
@Resource
private ApiScenarioMapper apiScenarioMapper;
@Resource
private UiScenarioMapper uiScenarioMapper;
@Resource
private TestPlanApiScenarioMapper testPlanApiScenarioMapper;
@Resource
private NoticeSendService noticeSendService;
@Resource
private UserService userService;
@Resource
private ProjectMapper projectMapper;
@Resource
private EnvironmentGroupMapper environmentGroupMapper;
@Resource
private ApiTestEnvironmentMapper apiTestEnvironmentMapper;
@Resource
private ApiScenarioReportStructureService apiScenarioReportStructureService;
@Resource
private ApiScenarioReportStructureMapper apiScenarioReportStructureMapper;
@Resource
private ApiDefinitionExecResultMapper definitionExecResultMapper;
@Resource
private ExtApiDefinitionExecResultMapper extApiDefinitionExecResultMapper;
@Resource
private UiReportServiceProxy uiReportServiceProxy;
@Resource
private ExtApiScenarioReportResultMapper extApiScenarioReportResultMapper;
@Resource
private ScenarioExecutionInfoService scenarioExecutionInfoService;
@Resource
private TestPlanUiScenarioMapper testPlanUiScenarioMapper;
public void saveResult(ResultDTO dto) {
apiScenarioReportResultService.save(dto.getReportId(), dto.getRequestResults());
}
public void batchSaveResult(List<ResultDTO> dtos) {
apiScenarioReportResultService.batchSave(dtos);
}
public void saveUiResult(List<RequestResult> requestResults, ResultDTO dto) {
uiReportServiceProxy.saveUiResult(dto.getReportId(), requestResults);
}
public ApiScenarioReport testEnded(ResultDTO dto) {
if (!StringUtils.equals(dto.getReportType(), RunModeConstants.SET_REPORT.toString())) {
apiScenarioReportStructureService.update(dto.getReportId(), dto.getConsole());
}
ApiScenarioReport scenarioReport;
if (StringUtils.equals(dto.getRunMode(), ApiRunMode.SCENARIO_PLAN.name())) {
scenarioReport = updatePlanCase(dto);
} else if (StringUtils.equalsAny(dto.getRunMode(), ApiRunMode.SCHEDULE_SCENARIO_PLAN.name(), ApiRunMode.JENKINS_SCENARIO_PLAN.name())) {
scenarioReport = updateSchedulePlanCase(dto);
} else if (dto.getRunMode().startsWith("UI")) {
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(dto.getReportId());
scenarioReport = updateUiScenario(apiScenarioReportResultMapper.selectByExample(example), dto);
} else {
scenarioReport = updateScenario(dto);
}
return scenarioReport;
}
public APIScenarioReportResult get(String reportId, boolean selectReportContent) {
APIScenarioReportResult reportResult = extApiScenarioReportMapper.get(reportId);
if (reportResult != null) {
if (reportResult.getReportVersion() != null && reportResult.getReportVersion() > 1) {
reportResult.setContent(JSON.toJSONString(apiScenarioReportStructureService.assembleReport(reportId, selectReportContent)));
} else {
ApiScenarioReportDetail detail = apiScenarioReportDetailMapper.selectByPrimaryKey(reportId);
if (detail != null && reportResult != null) {
reportResult.setContent(new String(detail.getContent(), StandardCharsets.UTF_8));
}
}
return reportResult;
}
APIScenarioReportResult result = this.getApiIntegrated(reportId);
return result;
}
/**
* CASE集成报告
*
* @param reportId
* @return
*/
public APIScenarioReportResult getApiIntegrated(String reportId) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportId);
if (result != null) {
APIScenarioReportResult reportResult = new APIScenarioReportResult();
BeanUtils.copyBean(reportResult, result);
reportResult.setReportVersion(2);
reportResult.setTestId(reportId);
ApiScenarioReportDTO dto = apiScenarioReportStructureService.apiIntegratedReport(reportId);
apiScenarioReportStructureService.initProjectEnvironmentByEnvConfig(dto, result.getEnvConfig());
reportResult.setContent(JSON.toJSONString(dto));
return reportResult;
}
return null;
}
public List<APIScenarioReportResult> list(QueryAPIReportRequest request) {
request = this.initRequest(request);
request.setOrders(ServiceUtils.getDefaultOrder(request.getOrders()));
List<APIScenarioReportResult> list = extApiScenarioReportMapper.list(request);
List<String> userIds = list.stream().map(APIScenarioReportResult::getUserId)
.collect(Collectors.toList());
Map<String, User> userMap = ServiceUtils.getUserMap(userIds);
list.forEach(item -> {
User user = userMap.get(item.getUserId());
if (user != null)
item.setUserName(user.getName());
});
return list;
}
public QueryAPIReportRequest initRequest(QueryAPIReportRequest request) {
if (request != null) {
if (MapUtils.isNotEmpty(request.getFilters()) && request.getFilters().containsKey("trigger_mode")
&& CollectionUtils.isNotEmpty(request.getFilters().get("trigger_mode"))
&& request.getFilters().get("trigger_mode").contains("API") && !request.getFilters().get("trigger_mode").contains(ReportTriggerMode.JENKINS_RUN_TEST_PLAN.name())) {
request.getFilters().get("trigger_mode").add(ReportTriggerMode.JENKINS_RUN_TEST_PLAN.name());
}
}
return request;
}
public List<String> idList(QueryAPIReportRequest request) {
request = this.initRequest(request);
request.setOrders(ServiceUtils.getDefaultOrder(request.getOrders()));
return extApiScenarioReportMapper.idList(request);
}
private void checkNameExist(APIScenarioReportResult request) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andNameEqualTo(request.getName()).andProjectIdEqualTo(request.getProjectId()).andExecuteTypeEqualTo(ExecuteType.Saved.name()).andIdNotEqualTo(request.getId());
if (apiScenarioReportMapper.countByExample(example) > 0) {
MSException.throwException(Translator.get("load_test_already_exists"));
}
}
public APIScenarioReportResult init(String scenarioIds, String reportName, String status, String scenarioNames, String triggerMode, String projectId, String userID) {
APIScenarioReportResult report = new APIScenarioReportResult();
if (triggerMode.equals(ApiRunMode.SCENARIO.name()) || triggerMode.equals(ApiRunMode.DEFINITION.name())) {
triggerMode = ReportTriggerMode.MANUAL.name();
}
report.setId(UUID.randomUUID().toString());
report.setName(reportName);
report.setCreateTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
report.setStatus(status);
if (StringUtils.isNotEmpty(userID)) {
report.setUserId(userID);
} else {
report.setUserId(SessionUtils.getUserId());
}
report.setTriggerMode(triggerMode);
report.setExecuteType(ExecuteType.Saved.name());
report.setProjectId(projectId);
report.setScenarioName(scenarioNames);
report.setScenarioId(scenarioIds);
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
apiScenarioReportMapper.insert(report);
return report;
}
public ApiScenarioReportWithBLOBs editReport(String reportType, String reportId, String status, String runMode) {
ApiScenarioReportWithBLOBs report = apiScenarioReportMapper.selectByPrimaryKey(reportId);
if (report == null) {
report = new ApiScenarioReportWithBLOBs();
report.setId(reportId);
}
if (StringUtils.equals(reportType, RunModeConstants.SET_REPORT.toString())) {
return report;
}
if (StringUtils.equals(runMode, "CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
report.setStatus(status);
report.setName(report.getScenarioName() + "-" + DateUtils.getTimeStr(System.currentTimeMillis()));
report.setEndTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
if (report.getExecuteType().equals(ExecuteType.Debug.name()) &&
report.getReportType().equals(ReportTypeConstants.UI_INDEPENDENT.name())) {
return report;
}
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
return report;
}
public ApiScenarioReport updateReport(APIScenarioReportResult test) {
checkNameExist(test);
ApiScenarioReportWithBLOBs report = new ApiScenarioReportWithBLOBs();
report.setId(test.getId());
report.setProjectId(test.getProjectId());
report.setName(test.getName());
report.setScenarioName(test.getScenarioName());
report.setScenarioId(test.getScenarioId());
report.setTriggerMode(test.getTriggerMode());
report.setDescription(test.getDescription());
report.setEndTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
report.setStatus(test.getStatus());
report.setUserId(test.getUserId());
report.setExecuteType(test.getExecuteType());
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
return report;
}
public ApiScenarioReport updatePlanCase(ResultDTO dto) {
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
TestPlanApiScenario testPlanApiScenario = testPlanApiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanApiScenario != null) {
if (report != null) {
testPlanApiScenario.setLastResult(report.getStatus());
} else {
testPlanApiScenario.setLastResult(status);
}
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size());
testPlanApiScenario.setPassRate(passRate);
testPlanApiScenario.setReportId(dto.getReportId());
testPlanApiScenario.setUpdateTime(System.currentTimeMillis());
testPlanApiScenarioMapper.updateByPrimaryKeySelective(testPlanApiScenario);
scenarioExecutionInfoService.insertExecutionInfo(testPlanApiScenario.getId(), status, report.getTriggerMode());
ApiScenario scenario = apiScenarioMapper.selectByPrimaryKey(testPlanApiScenario.getApiScenarioId());
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
scenario.setPassRate(passRate);
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
}
return report;
}
public ApiScenarioReport updateSchedulePlanCase(ResultDTO dto) {
List<String> testPlanReportIdList = new ArrayList<>();
StringBuilder scenarioNames = new StringBuilder();
String status = getStatus(dto);
ApiScenarioReportWithBLOBs report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
if (report != null) {
if (StringUtils.isNotEmpty(dto.getTestPlanReportId()) && !testPlanReportIdList.contains(dto.getTestPlanReportId())) {
testPlanReportIdList.add(dto.getTestPlanReportId());
}
TestPlanApiScenario testPlanApiScenario = testPlanApiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanApiScenario != null) {
report.setScenarioId(testPlanApiScenario.getApiScenarioId());
report.setEndTime(System.currentTimeMillis());
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
testPlanApiScenario.setLastResult(report.getStatus());
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size());
testPlanApiScenario.setPassRate(passRate);
testPlanApiScenario.setReportId(report.getId());
report.setEndTime(System.currentTimeMillis());
testPlanApiScenario.setUpdateTime(System.currentTimeMillis());
testPlanApiScenarioMapper.updateByPrimaryKeySelective(testPlanApiScenario);
scenarioNames.append(report.getName()).append(",");
ApiScenario scenario = apiScenarioMapper.selectByPrimaryKey(testPlanApiScenario.getApiScenarioId());
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
scenario.setPassRate(passRate);
scenario.setReportId(report.getId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
}
}
return report;
}
private String getIntegrationReportStatus(List<String> reportStatus) {
boolean hasError = false, hasErrorReport = false, hasUnExecute = false, hasOtherStatus = false, hasStop = false;
if (CollectionUtils.isEmpty(reportStatus)) {
hasUnExecute = true;
} else {
for (String status : reportStatus) {
if (StringUtils.equalsIgnoreCase(status, ExecuteResult.SCENARIO_ERROR.toString())) {
hasError = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.ERROR_REPORT_RESULT.toString())) {
hasErrorReport = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.STOP.toString())) {
hasStop = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.UN_EXECUTE.toString())) {
hasUnExecute = true;
} else {
hasOtherStatus = true;
}
}
if (hasError || hasErrorReport || hasOtherStatus) {
hasUnExecute = false;
hasStop = false;
}
}
return hasError ? ScenarioStatus.Error.name() :
hasErrorReport ? ExecuteResult.ERROR_REPORT_RESULT.toString() :
hasStop ? ExecuteResult.STOP.toString() :
hasUnExecute ? ExecuteResult.UN_EXECUTE.toString() : ScenarioStatus.Success.name();
}
public void margeReport(String reportId, String runMode, String console) {
if (StringUtils.equalsIgnoreCase(runMode, ApiRunMode.DEFINITION.name())) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportId);
if (!StringUtils.equalsAnyIgnoreCase(result.getStatus(), APITestStatus.Rerunning.name())) {
result.setEndTime(System.currentTimeMillis());
}
List<String> statusList = extApiDefinitionExecResultMapper.selectDistinctStatusByReportId(reportId);
result.setStatus(this.getIntegrationReportStatus(statusList));
definitionExecResultMapper.updateByPrimaryKeySelective(result);
} else {
ApiScenarioReport report = apiScenarioReportMapper.selectByPrimaryKey(reportId);
if (report != null) {
if (!StringUtils.equalsAnyIgnoreCase(report.getStatus(), APITestStatus.Rerunning.name())) {
report.setEndTime(System.currentTimeMillis());
}
List<String> statusList = extApiScenarioReportResultMapper.selectDistinctStatusByReportId(reportId);
report.setStatus(this.getIntegrationReportStatus(statusList));
apiScenarioReportMapper.updateByPrimaryKey(report);
scenarioExecutionInfoService.insertExecutionInfoByScenarioIds(report.getScenarioId(), report.getStatus(), report.getTriggerMode());
}
}
console = StringUtils.isNotEmpty(console) ? console : FixedCapacityUtils.getJmeterLogger(reportId, true);
if (StringUtils.isNotEmpty(console)) {
apiScenarioReportStructureService.update(reportId, console);
}
if (FixedCapacityUtils.jmeterLogTask.containsKey(reportId)) {
FixedCapacityUtils.jmeterLogTask.remove(reportId);
}
}
public ApiScenarioReport updateScenario(ResultDTO dto) {
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
ApiScenarioWithBLOBs scenario = apiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (scenario == null) {
scenario = apiScenarioMapper.selectByPrimaryKey(report.getScenarioId());
}
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
if (dto.getRequestResults().size() == 0) {
scenario.setPassRate("0%");
} else {
scenario.setPassRate(new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size()));
}
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
if (scenario != null && report != null) {
sendNotice(scenario, report);
}
return report;
}
@Nullable
private boolean updateUiScenario(List<ApiScenarioReportResult> requestResults, ResultDTO dto, long errorSize, String status, ApiScenarioReport report, UiScenarioWithBLOBs scenario) {
if (StringUtils.equalsAnyIgnoreCase(status, ExecuteResult.ERROR_REPORT_RESULT.toString())) {
scenario.setLastResult(status);
} else {
scenario.setLastResult(errorSize > 0 ? "Fail" : ScenarioStatus.Success.name());
}
long successSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
scenario.setPassRate(new DecimalFormat("0%").format((float) successSize / requestResults.size()));
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
if (report.getExecuteType().equals(ExecuteType.Debug.name()) &&
report.getReportType().equals(ReportTypeConstants.UI_INDEPENDENT.name())) {
return false;
}
uiScenarioMapper.updateByPrimaryKey(scenario);
return true;
}
public String getEnvironment(ApiScenarioWithBLOBs apiScenario) {
String environment = "未配置";
String environmentType = apiScenario.getEnvironmentType();
if (StringUtils.equals(environmentType, EnvironmentType.JSON.name()) && StringUtils.isNotEmpty(apiScenario.getEnvironmentJson())) {
String environmentJson = apiScenario.getEnvironmentJson();
JSONObject jsonObject = JSON.parseObject(environmentJson, Feature.DisableSpecialKeyDetect);
ApiTestEnvironmentExample example = new ApiTestEnvironmentExample();
List<String> collect = jsonObject.values().stream().map(Object::toString).collect(Collectors.toList());
collect.add("-1");
example.createCriteria().andIdIn(collect);
List<ApiTestEnvironment> envs = apiTestEnvironmentMapper.selectByExample(example);
String env = envs.stream().map(ApiTestEnvironment::getName).collect(Collectors.joining(","));
if (StringUtils.isNotBlank(env)) {
environment = env;
}
}
if (StringUtils.equals(environmentType, EnvironmentType.GROUP.name())) {
String environmentGroupId = apiScenario.getEnvironmentGroupId();
EnvironmentGroup environmentGroup = environmentGroupMapper.selectByPrimaryKey(environmentGroupId);
if (environmentGroup != null) {
environment = environmentGroup.getName();
}
}
return environment;
}
private void sendNotice(ApiScenarioWithBLOBs scenario, ApiScenarioReport result) {
BeanMap beanMap = new BeanMap(scenario);
String event;
String status;
if (StringUtils.equals(scenario.getLastResult(), ScenarioStatus.Success.name())) {
event = NoticeConstants.Event.EXECUTE_SUCCESSFUL;
status = "成功";
} else {
event = NoticeConstants.Event.EXECUTE_FAILED;
status = "失败";
}
String userId = result.getCreateUser();
UserDTO userDTO = userService.getUserDTO(userId);
SystemParameterService systemParameterService = CommonBeanFactory.getBean(SystemParameterService.class);
assert systemParameterService != null;
Map paramMap = new HashMap<>(beanMap);
paramMap.put("operator", userDTO.getName());
paramMap.put("status", scenario.getLastResult());
paramMap.put("environment", getEnvironment(scenario));
BaseSystemConfigDTO baseSystemConfigDTO = systemParameterService.getBaseInfo();
String reportUrl = baseSystemConfigDTO.getUrl() + "/
paramMap.put("reportUrl", reportUrl);
String context = "${operator}执行接口自动化" + status + ": ${name}";
NoticeModel noticeModel = NoticeModel.builder()
.operator(userId)
.context(context)
.subject("接口自动化通知")
.paramMap(paramMap)
.event(event)
.build();
Project project = projectMapper.selectByPrimaryKey(scenario.getProjectId());
noticeSendService.send(project, NoticeConstants.TaskType.API_AUTOMATION_TASK, noticeModel);
}
public String update(APIScenarioReportResult test) {
ApiScenarioReport report = updateReport(test);
ApiScenarioReportDetail detail = apiScenarioReportDetailMapper.selectByPrimaryKey(test.getId());
if (detail == null) {
detail = new ApiScenarioReportDetail();
detail.setContent(test.getContent().getBytes(StandardCharsets.UTF_8));
detail.setReportId(report.getId());
detail.setProjectId(test.getProjectId());
apiScenarioReportDetailMapper.insert(detail);
} else {
detail.setContent(test.getContent().getBytes(StandardCharsets.UTF_8));
detail.setReportId(report.getId());
detail.setProjectId(test.getProjectId());
apiScenarioReportDetailMapper.updateByPrimaryKey(detail);
}
return report.getId();
}
public static List<String> getReportIds(String content) {
try {
return JSON.parseObject(content, List.class);
} catch (Exception e) {
return null;
}
}
public void delete(DeleteAPIReportRequest request) {
ApiScenarioReport report = apiScenarioReportMapper.selectByPrimaryKey(request.getId());
deleteScenarioReportResource(request.getId());
deleteApiDefinitionResult(request.getId());
if (report != null && StringUtils.isNotEmpty(report.getScenarioId())) {
List<String> list = getReportIds(report.getScenarioId());
if (CollectionUtils.isNotEmpty(list)) {
APIReportBatchRequest reportRequest = new APIReportBatchRequest();
reportRequest.setIsUi(request.getIsUi());
reportRequest.setIds(list);
this.deleteAPIReportBatch(reportRequest);
}
}
}
public void deleteScenarioReportResource(String id) {
apiScenarioReportMapper.deleteByPrimaryKey(id);
apiScenarioReportDetailMapper.deleteByPrimaryKey(id);
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(id);
apiScenarioReportResultMapper.deleteByExample(example);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdEqualTo(id);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
}
public void delete(String id) {
apiScenarioReportDetailMapper.deleteByPrimaryKey(id);
apiScenarioReportMapper.deleteByPrimaryKey(id);
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(id);
apiScenarioReportResultMapper.deleteByExample(example);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdEqualTo(id);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdEqualTo(id);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdEqualTo(id);
definitionExecResultMapper.deleteByExample(execResultExample);
}
public void deleteByIds(List<String> ids) {
if (CollectionUtils.isNotEmpty(ids)) {
deleteScenarioReportByIds(ids);
deleteApiDefinitionResultByIds(ids);
}
}
public void deleteAPIReportBatch(APIReportBatchRequest reportRequest) {
List<String> ids = getIdsByDeleteBatchRequest(reportRequest);
ids = batchDeleteReportResource(reportRequest, ids, true);
if (!ids.isEmpty()) {
deleteScenarioReportByIds(ids);
deleteApiDefinitionResultByIds(ids);
}
}
public void deleteScenarioReportByIds(List<String> ids) {
ApiScenarioReportDetailExample detailExample = new ApiScenarioReportDetailExample();
detailExample.createCriteria().andReportIdIn(ids);
apiScenarioReportDetailMapper.deleteByExample(detailExample);
ApiScenarioReportExample apiTestReportExample = new ApiScenarioReportExample();
apiTestReportExample.createCriteria().andIdIn(ids);
apiScenarioReportMapper.deleteByExample(apiTestReportExample);
ApiScenarioReportResultExample reportResultExample = new ApiScenarioReportResultExample();
reportResultExample.createCriteria().andReportIdIn(ids);
apiScenarioReportResultMapper.deleteByExample(reportResultExample);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdIn(ids);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
}
private void deleteApiDefinitionResultByIds(List<String> ids) {
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdIn(ids);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdIn(ids);
definitionExecResultMapper.deleteByExample(execResultExample);
}
private void deleteApiDefinitionResult(String id) {
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdEqualTo(id);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdEqualTo(id);
definitionExecResultMapper.deleteByExample(execResultExample);
}
public List<String> getIdsByDeleteBatchRequest(APIReportBatchRequest reportRequest) {
List<String> ids = reportRequest.getIds();
if (reportRequest.isSelectAllDate()) {
ids = this.idList(reportRequest);
if (reportRequest.getUnSelectIds() != null) {
ids.removeAll(reportRequest.getUnSelectIds());
}
}
return ids;
}
public List<String> batchDeleteReportResource(APIReportBatchRequest reportRequest, List<String> ids, boolean deleteApiResult) {
List<String> myList = reportRequest.getIds().stream().distinct().collect(Collectors.toList());
reportRequest.setIds(myList);
int handleCount = 5000;
while (ids.size() > handleCount) {
List<String> handleIdList = new ArrayList<>(handleCount);
List<String> otherIdList = new ArrayList<>();
for (int index = 0; index < ids.size(); index++) {
if (index < handleCount) {
handleIdList.add(ids.get(index));
} else {
otherIdList.add(ids.get(index));
}
}
deleteScenarioReportByIds(handleIdList);
if (deleteApiResult) {
deleteApiDefinitionResultByIds(handleIdList);
}
ids = otherIdList;
}
return ids;
}
public long countByProjectIdAndCreateAndByScheduleInThisWeek(String projectId) {
Map<String, Date> startAndEndDateInWeek = DateUtils.getWeedFirstTimeAndLastTime(new Date());
Date firstTime = startAndEndDateInWeek.get("firstTime");
Date lastTime = startAndEndDateInWeek.get("lastTime");
if (firstTime == null || lastTime == null) {
return 0;
} else {
return extApiScenarioReportMapper.countByProjectIdAndCreateAndByScheduleInThisWeek(projectId, firstTime.getTime(), lastTime.getTime());
}
}
public long countByProjectIdAndCreateInThisWeek(String projectId) {
Map<String, Date> startAndEndDateInWeek = DateUtils.getWeedFirstTimeAndLastTime(new Date());
Date firstTime = startAndEndDateInWeek.get("firstTime");
Date lastTime = startAndEndDateInWeek.get("lastTime");
if (firstTime == null || lastTime == null) {
return 0;
} else {
return extApiScenarioReportMapper.countByProjectIdAndCreateInThisWeek(projectId, firstTime.getTime(), lastTime.getTime());
}
}
public List<ApiDataCountResult> countByProjectIdGroupByExecuteResult(String projectId) {
return extApiScenarioReportMapper.countByProjectIdGroupByExecuteResult(projectId);
}
public List<ApiScenarioReport> selectLastReportByIds(List<String> ids) {
if (!ids.isEmpty()) {
return extApiScenarioReportMapper.selectLastReportByIds(ids);
} else {
return new ArrayList<>(0);
}
}
public String getLogDetails(String id) {
ApiScenarioReport bloBs = apiScenarioReportMapper.selectByPrimaryKey(id);
if (bloBs != null) {
List<DetailColumn> columns = ReflexObjectUtil.getColumns(bloBs, ModuleReference.moduleColumns);
OperatingLogDetails details = new OperatingLogDetails(JSON.toJSONString(id), bloBs.getProjectId(), bloBs.getName(), bloBs.getCreateUser(), columns);
return JSON.toJSONString(details);
}
return null;
}
public String getLogDetails(List<String> ids) {
if (org.apache.commons.collections.CollectionUtils.isNotEmpty(ids)) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andIdIn(ids);
List<ApiScenarioReport> reportList = apiScenarioReportMapper.selectByExample(example);
List<String> names = reportList.stream().map(ApiScenarioReport::getName).collect(Collectors.toList());
OperatingLogDetails details = new OperatingLogDetails(JSON.toJSONString(ids), reportList.get(0).getProjectId(), String.join(",", names), reportList.get(0).getCreateUser(), new LinkedList<>());
return JSON.toJSONString(details);
}
return null;
}
public List<ApiScenarioReport> getByIds(List<String> ids) {
if (org.apache.commons.collections.CollectionUtils.isNotEmpty(ids)) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andIdIn(ids);
return apiScenarioReportMapper.selectByExample(example);
}
return null;
}
public List<ApiReportCountDTO> countByApiScenarioId() {
return extApiScenarioReportMapper.countByApiScenarioId();
}
public Map<String, String> getReportStatusByReportIds(Collection<String> values) {
if (CollectionUtils.isEmpty(values)) {
return new HashMap<>();
}
Map<String, String> map = new HashMap<>();
List<ApiScenarioReport> reportList = extApiScenarioReportMapper.selectStatusByIds(values);
for (ApiScenarioReport report : reportList) {
map.put(report.getId(), report.getStatus());
}
return map;
}
public APIScenarioReportResult init(String id, String scenarioId, String scenarioName, String triggerMode, String execType, String projectId, String userID, RunModeConfigDTO config) {
APIScenarioReportResult report = new APIScenarioReportResult();
if (triggerMode.equals(ApiRunMode.SCENARIO.name()) || triggerMode.equals(ApiRunMode.DEFINITION.name())) {
triggerMode = ReportTriggerMode.MANUAL.name();
}
report.setId(id);
report.setTestId(id);
if (StringUtils.isNotEmpty(scenarioName)) {
scenarioName = scenarioName.length() >= 3000 ? scenarioName.substring(0, 2000) : scenarioName;
report.setName(scenarioName);
} else {
report.setName("场景调试");
}
report.setUpdateTime(System.currentTimeMillis());
report.setCreateTime(System.currentTimeMillis());
String status = config != null && StringUtils.equals(config.getMode(), RunModeConstants.SERIAL.toString())
? APITestStatus.Waiting.name() : APITestStatus.Running.name();
report.setStatus(status);
if (StringUtils.isNotEmpty(userID)) {
report.setUserId(userID);
report.setCreateUser(userID);
} else {
report.setUserId(SessionUtils.getUserId());
report.setCreateUser(SessionUtils.getUserId());
}
if (config != null && StringUtils.isNotBlank(config.getResourcePoolId())) {
report.setActuator(config.getResourcePoolId());
} else {
report.setActuator("LOCAL");
}
report.setTriggerMode(triggerMode);
report.setReportVersion(2);
report.setExecuteType(execType);
report.setProjectId(projectId);
report.setScenarioName(scenarioName);
report.setScenarioId(scenarioId);
if (config != null) {
report.setEnvConfig(JSON.toJSONString(config));
}
report.setReportType(ReportTypeConstants.SCENARIO_INDEPENDENT.name());
return report;
}
/**
* 返回正确的报告状态
*
* @param dto jmeter返回
* @return
*/
private String getStatus(ResultDTO dto) {
if (MapUtils.isNotEmpty(dto.getArbitraryData()) && dto.getArbitraryData().containsKey("REPORT_STATUS")) {
return String.valueOf(dto.getArbitraryData().get("REPORT_STATUS"));
}
long errorSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Error.name())).count();
long errorReportResultSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.equalsIgnoreCase(requestResult.getStatus(), ExecuteResult.ERROR_REPORT_RESULT.toString())).count();
if (StringUtils.isNotEmpty(dto.getRunMode()) && dto.getRunMode().startsWith("UI")) {
try {
errorSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.isNotEmpty(requestResult.getResponseResult().getHeaders())
&& JSONArray.parseArray(requestResult.getResponseResult().getHeaders()) .stream().filter(
r -> ((JSONObject) r).containsKey("success") && !((JSONObject) r).getBoolean("success")
).count() > 0)
.count();
} catch (Exception e) {
errorSize = 1;
}
}
String status = dto.getRequestResults().isEmpty() ? ExecuteResult.UN_EXECUTE.toString() : ScenarioStatus.Success.name();
if (errorSize > 0) {
status = ScenarioStatus.Error.name();
} else if (errorReportResultSize > 0) {
status = ExecuteResult.ERROR_REPORT_RESULT.toString();
}
if (dto != null && dto.getArbitraryData() != null && dto.getArbitraryData().containsKey("TIMEOUT") && (Boolean) dto.getArbitraryData().get("TIMEOUT")) {
LoggerUtil.info("资源 " + dto.getTestId() + " 执行超时", dto.getReportId());
status = ScenarioStatus.Timeout.name();
}
return status;
}
public List<PlanReportCaseDTO> selectForPlanReport(List<String> reportIds) {
return extApiScenarioReportMapper.selectForPlanReport(reportIds);
}
public void cleanUpReport(long time, String projectId) {
List<String> ids = extApiScenarioReportMapper.selectByProjectIdAndLessThanTime(projectId, time);
List<String> definitionExecIds = extApiDefinitionExecResultMapper.selectByProjectIdAndLessThanTime(projectId, time);
ids.addAll(definitionExecIds);
if (CollectionUtils.isNotEmpty(ids)) {
APIReportBatchRequest request = new APIReportBatchRequest();
request.setIds(ids);
request.setSelectAllDate(false);
deleteAPIReportBatch(request);
}
}
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void batchSave(Map<String, RunModeDataDTO> executeQueue, String serialReportId, String runMode, List<MsExecResponseDTO> responseDTOS) {
List<APIScenarioReportResult> list = new LinkedList<>();
if (StringUtils.isEmpty(serialReportId)) {
for (String reportId : executeQueue.keySet()) {
APIScenarioReportResult report = executeQueue.get(reportId).getReport();
list.add(report);
responseDTOS.add(new MsExecResponseDTO(executeQueue.get(reportId).getTestId(), reportId, runMode));
}
if (CollectionUtils.isNotEmpty(list)) {
extApiScenarioReportMapper.sqlInsert(list);
}
}
}
public void reName(ApiScenarioReport reportRequest) {
if (StringUtils.equalsAnyIgnoreCase(reportRequest.getReportType(), ReportTypeConstants.API_INDEPENDENT.name(), ReportTypeConstants.API_INTEGRATED.name())) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportRequest.getId());
if (result != null) {
result.setName(reportRequest.getName());
definitionExecResultMapper.updateByPrimaryKeySelective(result);
}
} else {
ApiScenarioReport apiTestReport = apiScenarioReportMapper.selectByPrimaryKey(reportRequest.getId());
if (apiTestReport != null) {
apiTestReport.setName(reportRequest.getName());
apiScenarioReportMapper.updateByPrimaryKey(apiTestReport);
}
}
}
public RequestResult selectReportContent(String stepId) {
return apiScenarioReportStructureService.selectReportContent(stepId);
}
public void cleanUpUiReportImg(List<String> ids) {
try {
if (ids != null && CollectionUtils.isNotEmpty(ids)) {
for (String id : ids) {
if (FileUtil.deleteDir(new File(FileUtils.UI_IMAGE_DIR + "/" + id))) {
LogUtil.info("删除 UI 报告截图成功,报告 ID 为 :" + id);
}
}
}
} catch (Exception e) {
LogUtil.error(e.getMessage(), e);
MSException.throwException(e.getMessage());
}
}
}
|
class ApiScenarioReportService {
@Resource
private ExtApiScenarioReportMapper extApiScenarioReportMapper;
@Resource
private ApiScenarioReportMapper apiScenarioReportMapper;
@Resource
private ApiScenarioReportDetailMapper apiScenarioReportDetailMapper;
@Resource
private ApiScenarioReportResultMapper apiScenarioReportResultMapper;
@Resource
private ApiScenarioReportResultService apiScenarioReportResultService;
@Resource
private ApiScenarioMapper apiScenarioMapper;
@Resource
private UiScenarioMapper uiScenarioMapper;
@Resource
private TestPlanApiScenarioMapper testPlanApiScenarioMapper;
@Resource
private NoticeSendService noticeSendService;
@Resource
private UserService userService;
@Resource
private ProjectMapper projectMapper;
@Resource
private EnvironmentGroupMapper environmentGroupMapper;
@Resource
private ApiTestEnvironmentMapper apiTestEnvironmentMapper;
@Resource
private ApiScenarioReportStructureService apiScenarioReportStructureService;
@Resource
private ApiScenarioReportStructureMapper apiScenarioReportStructureMapper;
@Resource
private ApiDefinitionExecResultMapper definitionExecResultMapper;
@Resource
private ExtApiDefinitionExecResultMapper extApiDefinitionExecResultMapper;
@Resource
private UiReportServiceProxy uiReportServiceProxy;
@Resource
private ExtApiScenarioReportResultMapper extApiScenarioReportResultMapper;
@Resource
private ScenarioExecutionInfoService scenarioExecutionInfoService;
@Resource
private TestPlanUiScenarioMapper testPlanUiScenarioMapper;
public void saveResult(ResultDTO dto) {
apiScenarioReportResultService.save(dto.getReportId(), dto.getRequestResults());
}
public void batchSaveResult(List<ResultDTO> dtos) {
apiScenarioReportResultService.batchSave(dtos);
}
public void saveUiResult(List<RequestResult> requestResults, ResultDTO dto) {
uiReportServiceProxy.saveUiResult(dto.getReportId(), requestResults);
}
public ApiScenarioReport testEnded(ResultDTO dto) {
if (!StringUtils.equals(dto.getReportType(), RunModeConstants.SET_REPORT.toString())) {
apiScenarioReportStructureService.update(dto.getReportId(), dto.getConsole());
}
ApiScenarioReport scenarioReport;
if (StringUtils.equals(dto.getRunMode(), ApiRunMode.SCENARIO_PLAN.name())) {
scenarioReport = updatePlanCase(dto);
} else if (StringUtils.equalsAny(dto.getRunMode(), ApiRunMode.SCHEDULE_SCENARIO_PLAN.name(), ApiRunMode.JENKINS_SCENARIO_PLAN.name())) {
scenarioReport = updateSchedulePlanCase(dto);
} else if (dto.getRunMode().startsWith("UI")) {
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(dto.getReportId());
scenarioReport = updateUiScenario(apiScenarioReportResultMapper.selectByExample(example), dto);
} else {
scenarioReport = updateScenario(dto);
}
return scenarioReport;
}
public APIScenarioReportResult get(String reportId, boolean selectReportContent) {
APIScenarioReportResult reportResult = extApiScenarioReportMapper.get(reportId);
if (reportResult != null) {
if (reportResult.getReportVersion() != null && reportResult.getReportVersion() > 1) {
reportResult.setContent(JSON.toJSONString(apiScenarioReportStructureService.assembleReport(reportId, selectReportContent)));
} else {
ApiScenarioReportDetail detail = apiScenarioReportDetailMapper.selectByPrimaryKey(reportId);
if (detail != null && reportResult != null) {
reportResult.setContent(new String(detail.getContent(), StandardCharsets.UTF_8));
}
}
return reportResult;
}
APIScenarioReportResult result = this.getApiIntegrated(reportId);
return result;
}
/**
* CASE集成报告
*
* @param reportId
* @return
*/
public APIScenarioReportResult getApiIntegrated(String reportId) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportId);
if (result != null) {
APIScenarioReportResult reportResult = new APIScenarioReportResult();
BeanUtils.copyBean(reportResult, result);
reportResult.setReportVersion(2);
reportResult.setTestId(reportId);
ApiScenarioReportDTO dto = apiScenarioReportStructureService.apiIntegratedReport(reportId);
apiScenarioReportStructureService.initProjectEnvironmentByEnvConfig(dto, result.getEnvConfig());
reportResult.setContent(JSON.toJSONString(dto));
return reportResult;
}
return null;
}
public List<APIScenarioReportResult> list(QueryAPIReportRequest request) {
request = this.initRequest(request);
request.setOrders(ServiceUtils.getDefaultOrder(request.getOrders()));
List<APIScenarioReportResult> list = extApiScenarioReportMapper.list(request);
List<String> userIds = list.stream().map(APIScenarioReportResult::getUserId)
.collect(Collectors.toList());
Map<String, User> userMap = ServiceUtils.getUserMap(userIds);
list.forEach(item -> {
User user = userMap.get(item.getUserId());
if (user != null)
item.setUserName(user.getName());
});
return list;
}
public QueryAPIReportRequest initRequest(QueryAPIReportRequest request) {
if (request != null) {
if (MapUtils.isNotEmpty(request.getFilters()) && request.getFilters().containsKey("trigger_mode")
&& CollectionUtils.isNotEmpty(request.getFilters().get("trigger_mode"))
&& request.getFilters().get("trigger_mode").contains("API") && !request.getFilters().get("trigger_mode").contains(ReportTriggerMode.JENKINS_RUN_TEST_PLAN.name())) {
request.getFilters().get("trigger_mode").add(ReportTriggerMode.JENKINS_RUN_TEST_PLAN.name());
}
}
return request;
}
public List<String> idList(QueryAPIReportRequest request) {
request = this.initRequest(request);
request.setOrders(ServiceUtils.getDefaultOrder(request.getOrders()));
return extApiScenarioReportMapper.idList(request);
}
private void checkNameExist(APIScenarioReportResult request) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andNameEqualTo(request.getName()).andProjectIdEqualTo(request.getProjectId()).andExecuteTypeEqualTo(ExecuteType.Saved.name()).andIdNotEqualTo(request.getId());
if (apiScenarioReportMapper.countByExample(example) > 0) {
MSException.throwException(Translator.get("load_test_already_exists"));
}
}
public APIScenarioReportResult init(String scenarioIds, String reportName, String status, String scenarioNames, String triggerMode, String projectId, String userID) {
APIScenarioReportResult report = new APIScenarioReportResult();
if (triggerMode.equals(ApiRunMode.SCENARIO.name()) || triggerMode.equals(ApiRunMode.DEFINITION.name())) {
triggerMode = ReportTriggerMode.MANUAL.name();
}
report.setId(UUID.randomUUID().toString());
report.setName(reportName);
report.setCreateTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
report.setStatus(status);
if (StringUtils.isNotEmpty(userID)) {
report.setUserId(userID);
} else {
report.setUserId(SessionUtils.getUserId());
}
report.setTriggerMode(triggerMode);
report.setExecuteType(ExecuteType.Saved.name());
report.setProjectId(projectId);
report.setScenarioName(scenarioNames);
report.setScenarioId(scenarioIds);
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
apiScenarioReportMapper.insert(report);
return report;
}
public ApiScenarioReportWithBLOBs editReport(String reportType, String reportId, String status, String runMode) {
ApiScenarioReportWithBLOBs report = apiScenarioReportMapper.selectByPrimaryKey(reportId);
if (report == null) {
report = new ApiScenarioReportWithBLOBs();
report.setId(reportId);
}
if (StringUtils.equals(reportType, RunModeConstants.SET_REPORT.toString())) {
return report;
}
if (StringUtils.equals(runMode, "CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
report.setStatus(status);
report.setName(report.getScenarioName() + "-" + DateUtils.getTimeStr(System.currentTimeMillis()));
report.setEndTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
if (report.getExecuteType().equals(ExecuteType.Debug.name()) &&
report.getReportType().equals(ReportTypeConstants.UI_INDEPENDENT.name())) {
return report;
}
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
return report;
}
public ApiScenarioReport updateReport(APIScenarioReportResult test) {
checkNameExist(test);
ApiScenarioReportWithBLOBs report = new ApiScenarioReportWithBLOBs();
report.setId(test.getId());
report.setProjectId(test.getProjectId());
report.setName(test.getName());
report.setScenarioName(test.getScenarioName());
report.setScenarioId(test.getScenarioId());
report.setTriggerMode(test.getTriggerMode());
report.setDescription(test.getDescription());
report.setEndTime(System.currentTimeMillis());
report.setUpdateTime(System.currentTimeMillis());
report.setStatus(test.getStatus());
report.setUserId(test.getUserId());
report.setExecuteType(test.getExecuteType());
if (StringUtils.isNotEmpty(report.getTriggerMode()) && report.getTriggerMode().equals("CASE")) {
report.setTriggerMode(TriggerMode.MANUAL.name());
}
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
return report;
}
public ApiScenarioReport updatePlanCase(ResultDTO dto) {
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
TestPlanApiScenario testPlanApiScenario = testPlanApiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanApiScenario != null) {
if (report != null) {
testPlanApiScenario.setLastResult(report.getStatus());
} else {
testPlanApiScenario.setLastResult(status);
}
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size());
testPlanApiScenario.setPassRate(passRate);
testPlanApiScenario.setReportId(dto.getReportId());
testPlanApiScenario.setUpdateTime(System.currentTimeMillis());
testPlanApiScenarioMapper.updateByPrimaryKeySelective(testPlanApiScenario);
scenarioExecutionInfoService.insertExecutionInfo(testPlanApiScenario.getId(), status, report.getTriggerMode());
ApiScenario scenario = apiScenarioMapper.selectByPrimaryKey(testPlanApiScenario.getApiScenarioId());
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
scenario.setPassRate(passRate);
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
}
return report;
}
public ApiScenarioReport updateSchedulePlanCase(ResultDTO dto) {
List<String> testPlanReportIdList = new ArrayList<>();
StringBuilder scenarioNames = new StringBuilder();
String status = getStatus(dto);
ApiScenarioReportWithBLOBs report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
if (report != null) {
if (StringUtils.isNotEmpty(dto.getTestPlanReportId()) && !testPlanReportIdList.contains(dto.getTestPlanReportId())) {
testPlanReportIdList.add(dto.getTestPlanReportId());
}
TestPlanApiScenario testPlanApiScenario = testPlanApiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (testPlanApiScenario != null) {
report.setScenarioId(testPlanApiScenario.getApiScenarioId());
report.setEndTime(System.currentTimeMillis());
apiScenarioReportMapper.updateByPrimaryKeySelective(report);
testPlanApiScenario.setLastResult(report.getStatus());
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
String passRate = new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size());
testPlanApiScenario.setPassRate(passRate);
testPlanApiScenario.setReportId(report.getId());
report.setEndTime(System.currentTimeMillis());
testPlanApiScenario.setUpdateTime(System.currentTimeMillis());
testPlanApiScenarioMapper.updateByPrimaryKeySelective(testPlanApiScenario);
scenarioNames.append(report.getName()).append(",");
ApiScenario scenario = apiScenarioMapper.selectByPrimaryKey(testPlanApiScenario.getApiScenarioId());
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
scenario.setPassRate(passRate);
scenario.setReportId(report.getId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
}
}
return report;
}
private String getIntegrationReportStatus(List<String> reportStatus) {
boolean hasError = false, hasErrorReport = false, hasUnExecute = false, hasOtherStatus = false, hasStop = false;
if (CollectionUtils.isEmpty(reportStatus)) {
hasUnExecute = true;
} else {
for (String status : reportStatus) {
if (StringUtils.equalsIgnoreCase(status, ExecuteResult.SCENARIO_ERROR.toString())) {
hasError = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.ERROR_REPORT_RESULT.toString())) {
hasErrorReport = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.STOP.toString())) {
hasStop = true;
} else if (StringUtils.equalsIgnoreCase(status, ExecuteResult.UN_EXECUTE.toString())) {
hasUnExecute = true;
} else {
hasOtherStatus = true;
}
}
if (hasError || hasErrorReport || hasOtherStatus) {
hasUnExecute = false;
hasStop = false;
}
}
return hasError ? ScenarioStatus.Error.name() :
hasErrorReport ? ExecuteResult.ERROR_REPORT_RESULT.toString() :
hasStop ? ExecuteResult.STOP.toString() :
hasUnExecute ? ExecuteResult.UN_EXECUTE.toString() : ScenarioStatus.Success.name();
}
public void margeReport(String reportId, String runMode, String console) {
if (StringUtils.equalsIgnoreCase(runMode, ApiRunMode.DEFINITION.name())) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportId);
if (!StringUtils.equalsAnyIgnoreCase(result.getStatus(), APITestStatus.Rerunning.name())) {
result.setEndTime(System.currentTimeMillis());
}
List<String> statusList = extApiDefinitionExecResultMapper.selectDistinctStatusByReportId(reportId);
result.setStatus(this.getIntegrationReportStatus(statusList));
definitionExecResultMapper.updateByPrimaryKeySelective(result);
} else {
ApiScenarioReport report = apiScenarioReportMapper.selectByPrimaryKey(reportId);
if (report != null) {
if (!StringUtils.equalsAnyIgnoreCase(report.getStatus(), APITestStatus.Rerunning.name())) {
report.setEndTime(System.currentTimeMillis());
}
List<String> statusList = extApiScenarioReportResultMapper.selectDistinctStatusByReportId(reportId);
report.setStatus(this.getIntegrationReportStatus(statusList));
apiScenarioReportMapper.updateByPrimaryKey(report);
scenarioExecutionInfoService.insertExecutionInfoByScenarioIds(report.getScenarioId(), report.getStatus(), report.getTriggerMode());
}
}
console = StringUtils.isNotEmpty(console) ? console : FixedCapacityUtils.getJmeterLogger(reportId, true);
if (StringUtils.isNotEmpty(console)) {
apiScenarioReportStructureService.update(reportId, console);
}
if (FixedCapacityUtils.jmeterLogTask.containsKey(reportId)) {
FixedCapacityUtils.jmeterLogTask.remove(reportId);
}
}
public ApiScenarioReport updateScenario(ResultDTO dto) {
String status = getStatus(dto);
ApiScenarioReport report = editReport(dto.getReportType(), dto.getReportId(), status, dto.getRunMode());
ApiScenarioWithBLOBs scenario = apiScenarioMapper.selectByPrimaryKey(dto.getTestId());
if (scenario == null) {
scenario = apiScenarioMapper.selectByPrimaryKey(report.getScenarioId());
}
if (scenario != null) {
scenario.setLastResult(StringUtils.endsWithIgnoreCase(status, ScenarioStatus.Error.name())
? ScenarioStatus.Fail.name() : status);
long successSize = dto.getRequestResults().stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
if (dto.getRequestResults().size() == 0) {
scenario.setPassRate("0%");
} else {
scenario.setPassRate(new DecimalFormat("0%").format((float) successSize / dto.getRequestResults().size()));
}
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
apiScenarioMapper.updateByPrimaryKey(scenario);
}
if (scenario != null && report != null) {
sendNotice(scenario, report);
}
return report;
}
@Nullable
private boolean updateUiScenario(List<ApiScenarioReportResult> requestResults, ResultDTO dto, long errorSize, String status, ApiScenarioReport report, UiScenarioWithBLOBs scenario) {
if (StringUtils.equalsAnyIgnoreCase(status, ExecuteResult.ERROR_REPORT_RESULT.toString())) {
scenario.setLastResult(status);
} else {
scenario.setLastResult(errorSize > 0 ? "Fail" : ScenarioStatus.Success.name());
}
long successSize = requestResults.stream().filter(requestResult -> StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Success.name())).count();
scenario.setPassRate(new DecimalFormat("0%").format((float) successSize / requestResults.size()));
scenario.setReportId(dto.getReportId());
int executeTimes = 0;
if (scenario.getExecuteTimes() != null) {
executeTimes = scenario.getExecuteTimes().intValue();
}
scenario.setExecuteTimes(executeTimes + 1);
if (report.getExecuteType().equals(ExecuteType.Debug.name()) &&
report.getReportType().equals(ReportTypeConstants.UI_INDEPENDENT.name())) {
return false;
}
uiScenarioMapper.updateByPrimaryKey(scenario);
return true;
}
public String getEnvironment(ApiScenarioWithBLOBs apiScenario) {
String environment = "未配置";
String environmentType = apiScenario.getEnvironmentType();
if (StringUtils.equals(environmentType, EnvironmentType.JSON.name()) && StringUtils.isNotEmpty(apiScenario.getEnvironmentJson())) {
String environmentJson = apiScenario.getEnvironmentJson();
JSONObject jsonObject = JSON.parseObject(environmentJson, Feature.DisableSpecialKeyDetect);
ApiTestEnvironmentExample example = new ApiTestEnvironmentExample();
List<String> collect = jsonObject.values().stream().map(Object::toString).collect(Collectors.toList());
collect.add("-1");
example.createCriteria().andIdIn(collect);
List<ApiTestEnvironment> envs = apiTestEnvironmentMapper.selectByExample(example);
String env = envs.stream().map(ApiTestEnvironment::getName).collect(Collectors.joining(","));
if (StringUtils.isNotBlank(env)) {
environment = env;
}
}
if (StringUtils.equals(environmentType, EnvironmentType.GROUP.name())) {
String environmentGroupId = apiScenario.getEnvironmentGroupId();
EnvironmentGroup environmentGroup = environmentGroupMapper.selectByPrimaryKey(environmentGroupId);
if (environmentGroup != null) {
environment = environmentGroup.getName();
}
}
return environment;
}
private void sendNotice(ApiScenarioWithBLOBs scenario, ApiScenarioReport result) {
BeanMap beanMap = new BeanMap(scenario);
String event;
String status;
if (StringUtils.equals(scenario.getLastResult(), ScenarioStatus.Success.name())) {
event = NoticeConstants.Event.EXECUTE_SUCCESSFUL;
status = "成功";
} else {
event = NoticeConstants.Event.EXECUTE_FAILED;
status = "失败";
}
String userId = result.getCreateUser();
UserDTO userDTO = userService.getUserDTO(userId);
SystemParameterService systemParameterService = CommonBeanFactory.getBean(SystemParameterService.class);
assert systemParameterService != null;
Map paramMap = new HashMap<>(beanMap);
paramMap.put("operator", userDTO.getName());
paramMap.put("status", scenario.getLastResult());
paramMap.put("environment", getEnvironment(scenario));
BaseSystemConfigDTO baseSystemConfigDTO = systemParameterService.getBaseInfo();
String reportUrl = baseSystemConfigDTO.getUrl() + "/
paramMap.put("reportUrl", reportUrl);
String context = "${operator}执行接口自动化" + status + ": ${name}";
NoticeModel noticeModel = NoticeModel.builder()
.operator(userId)
.context(context)
.subject("接口自动化通知")
.paramMap(paramMap)
.event(event)
.build();
Project project = projectMapper.selectByPrimaryKey(scenario.getProjectId());
noticeSendService.send(project, NoticeConstants.TaskType.API_AUTOMATION_TASK, noticeModel);
}
public String update(APIScenarioReportResult test) {
ApiScenarioReport report = updateReport(test);
ApiScenarioReportDetail detail = apiScenarioReportDetailMapper.selectByPrimaryKey(test.getId());
if (detail == null) {
detail = new ApiScenarioReportDetail();
detail.setContent(test.getContent().getBytes(StandardCharsets.UTF_8));
detail.setReportId(report.getId());
detail.setProjectId(test.getProjectId());
apiScenarioReportDetailMapper.insert(detail);
} else {
detail.setContent(test.getContent().getBytes(StandardCharsets.UTF_8));
detail.setReportId(report.getId());
detail.setProjectId(test.getProjectId());
apiScenarioReportDetailMapper.updateByPrimaryKey(detail);
}
return report.getId();
}
public static List<String> getReportIds(String content) {
try {
return JSON.parseObject(content, List.class);
} catch (Exception e) {
return null;
}
}
public void delete(DeleteAPIReportRequest request) {
ApiScenarioReport report = apiScenarioReportMapper.selectByPrimaryKey(request.getId());
deleteScenarioReportResource(request.getId());
deleteApiDefinitionResult(request.getId());
if (report != null && StringUtils.isNotEmpty(report.getScenarioId())) {
List<String> list = getReportIds(report.getScenarioId());
if (CollectionUtils.isNotEmpty(list)) {
APIReportBatchRequest reportRequest = new APIReportBatchRequest();
reportRequest.setIsUi(request.getIsUi());
reportRequest.setIds(list);
this.deleteAPIReportBatch(reportRequest);
}
}
}
public void deleteScenarioReportResource(String id) {
apiScenarioReportMapper.deleteByPrimaryKey(id);
apiScenarioReportDetailMapper.deleteByPrimaryKey(id);
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(id);
apiScenarioReportResultMapper.deleteByExample(example);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdEqualTo(id);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
}
public void delete(String id) {
apiScenarioReportDetailMapper.deleteByPrimaryKey(id);
apiScenarioReportMapper.deleteByPrimaryKey(id);
ApiScenarioReportResultExample example = new ApiScenarioReportResultExample();
example.createCriteria().andReportIdEqualTo(id);
apiScenarioReportResultMapper.deleteByExample(example);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdEqualTo(id);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdEqualTo(id);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdEqualTo(id);
definitionExecResultMapper.deleteByExample(execResultExample);
}
public void deleteByIds(List<String> ids) {
if (CollectionUtils.isNotEmpty(ids)) {
deleteScenarioReportByIds(ids);
deleteApiDefinitionResultByIds(ids);
}
}
public void deleteAPIReportBatch(APIReportBatchRequest reportRequest) {
List<String> ids = getIdsByDeleteBatchRequest(reportRequest);
ids = batchDeleteReportResource(reportRequest, ids, true);
if (!ids.isEmpty()) {
deleteScenarioReportByIds(ids);
deleteApiDefinitionResultByIds(ids);
}
}
public void deleteScenarioReportByIds(List<String> ids) {
ApiScenarioReportDetailExample detailExample = new ApiScenarioReportDetailExample();
detailExample.createCriteria().andReportIdIn(ids);
apiScenarioReportDetailMapper.deleteByExample(detailExample);
ApiScenarioReportExample apiTestReportExample = new ApiScenarioReportExample();
apiTestReportExample.createCriteria().andIdIn(ids);
apiScenarioReportMapper.deleteByExample(apiTestReportExample);
ApiScenarioReportResultExample reportResultExample = new ApiScenarioReportResultExample();
reportResultExample.createCriteria().andReportIdIn(ids);
apiScenarioReportResultMapper.deleteByExample(reportResultExample);
ApiScenarioReportStructureExample structureExample = new ApiScenarioReportStructureExample();
structureExample.createCriteria().andReportIdIn(ids);
apiScenarioReportStructureMapper.deleteByExample(structureExample);
}
private void deleteApiDefinitionResultByIds(List<String> ids) {
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdIn(ids);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdIn(ids);
definitionExecResultMapper.deleteByExample(execResultExample);
}
private void deleteApiDefinitionResult(String id) {
ApiDefinitionExecResultExample definitionExecResultExample = new ApiDefinitionExecResultExample();
definitionExecResultExample.createCriteria().andIdEqualTo(id);
definitionExecResultMapper.deleteByExample(definitionExecResultExample);
ApiDefinitionExecResultExample execResultExample = new ApiDefinitionExecResultExample();
execResultExample.createCriteria().andIntegratedReportIdEqualTo(id);
definitionExecResultMapper.deleteByExample(execResultExample);
}
public List<String> getIdsByDeleteBatchRequest(APIReportBatchRequest reportRequest) {
List<String> ids = reportRequest.getIds();
if (reportRequest.isSelectAllDate()) {
ids = this.idList(reportRequest);
if (reportRequest.getUnSelectIds() != null) {
ids.removeAll(reportRequest.getUnSelectIds());
}
}
return ids;
}
public List<String> batchDeleteReportResource(APIReportBatchRequest reportRequest, List<String> ids, boolean deleteApiResult) {
List<String> myList = reportRequest.getIds().stream().distinct().collect(Collectors.toList());
reportRequest.setIds(myList);
int handleCount = 5000;
while (ids.size() > handleCount) {
List<String> handleIdList = new ArrayList<>(handleCount);
List<String> otherIdList = new ArrayList<>();
for (int index = 0; index < ids.size(); index++) {
if (index < handleCount) {
handleIdList.add(ids.get(index));
} else {
otherIdList.add(ids.get(index));
}
}
deleteScenarioReportByIds(handleIdList);
if (deleteApiResult) {
deleteApiDefinitionResultByIds(handleIdList);
}
ids = otherIdList;
}
return ids;
}
public long countByProjectIdAndCreateAndByScheduleInThisWeek(String projectId) {
Map<String, Date> startAndEndDateInWeek = DateUtils.getWeedFirstTimeAndLastTime(new Date());
Date firstTime = startAndEndDateInWeek.get("firstTime");
Date lastTime = startAndEndDateInWeek.get("lastTime");
if (firstTime == null || lastTime == null) {
return 0;
} else {
return extApiScenarioReportMapper.countByProjectIdAndCreateAndByScheduleInThisWeek(projectId, firstTime.getTime(), lastTime.getTime());
}
}
public long countByProjectIdAndCreateInThisWeek(String projectId) {
Map<String, Date> startAndEndDateInWeek = DateUtils.getWeedFirstTimeAndLastTime(new Date());
Date firstTime = startAndEndDateInWeek.get("firstTime");
Date lastTime = startAndEndDateInWeek.get("lastTime");
if (firstTime == null || lastTime == null) {
return 0;
} else {
return extApiScenarioReportMapper.countByProjectIdAndCreateInThisWeek(projectId, firstTime.getTime(), lastTime.getTime());
}
}
public List<ApiDataCountResult> countByProjectIdGroupByExecuteResult(String projectId) {
return extApiScenarioReportMapper.countByProjectIdGroupByExecuteResult(projectId);
}
public List<ApiScenarioReport> selectLastReportByIds(List<String> ids) {
if (!ids.isEmpty()) {
return extApiScenarioReportMapper.selectLastReportByIds(ids);
} else {
return new ArrayList<>(0);
}
}
public String getLogDetails(String id) {
ApiScenarioReport bloBs = apiScenarioReportMapper.selectByPrimaryKey(id);
if (bloBs != null) {
List<DetailColumn> columns = ReflexObjectUtil.getColumns(bloBs, ModuleReference.moduleColumns);
OperatingLogDetails details = new OperatingLogDetails(JSON.toJSONString(id), bloBs.getProjectId(), bloBs.getName(), bloBs.getCreateUser(), columns);
return JSON.toJSONString(details);
}
return null;
}
public String getLogDetails(List<String> ids) {
if (org.apache.commons.collections.CollectionUtils.isNotEmpty(ids)) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andIdIn(ids);
List<ApiScenarioReport> reportList = apiScenarioReportMapper.selectByExample(example);
List<String> names = reportList.stream().map(ApiScenarioReport::getName).collect(Collectors.toList());
OperatingLogDetails details = new OperatingLogDetails(JSON.toJSONString(ids), reportList.get(0).getProjectId(), String.join(",", names), reportList.get(0).getCreateUser(), new LinkedList<>());
return JSON.toJSONString(details);
}
return null;
}
public List<ApiScenarioReport> getByIds(List<String> ids) {
if (org.apache.commons.collections.CollectionUtils.isNotEmpty(ids)) {
ApiScenarioReportExample example = new ApiScenarioReportExample();
example.createCriteria().andIdIn(ids);
return apiScenarioReportMapper.selectByExample(example);
}
return null;
}
public List<ApiReportCountDTO> countByApiScenarioId() {
return extApiScenarioReportMapper.countByApiScenarioId();
}
public Map<String, String> getReportStatusByReportIds(Collection<String> values) {
if (CollectionUtils.isEmpty(values)) {
return new HashMap<>();
}
Map<String, String> map = new HashMap<>();
List<ApiScenarioReport> reportList = extApiScenarioReportMapper.selectStatusByIds(values);
for (ApiScenarioReport report : reportList) {
map.put(report.getId(), report.getStatus());
}
return map;
}
public APIScenarioReportResult init(String id, String scenarioId, String scenarioName, String triggerMode, String execType, String projectId, String userID, RunModeConfigDTO config) {
APIScenarioReportResult report = new APIScenarioReportResult();
if (triggerMode.equals(ApiRunMode.SCENARIO.name()) || triggerMode.equals(ApiRunMode.DEFINITION.name())) {
triggerMode = ReportTriggerMode.MANUAL.name();
}
report.setId(id);
report.setTestId(id);
if (StringUtils.isNotEmpty(scenarioName)) {
scenarioName = scenarioName.length() >= 3000 ? scenarioName.substring(0, 2000) : scenarioName;
report.setName(scenarioName);
} else {
report.setName("场景调试");
}
report.setUpdateTime(System.currentTimeMillis());
report.setCreateTime(System.currentTimeMillis());
String status = config != null && StringUtils.equals(config.getMode(), RunModeConstants.SERIAL.toString())
? APITestStatus.Waiting.name() : APITestStatus.Running.name();
report.setStatus(status);
if (StringUtils.isNotEmpty(userID)) {
report.setUserId(userID);
report.setCreateUser(userID);
} else {
report.setUserId(SessionUtils.getUserId());
report.setCreateUser(SessionUtils.getUserId());
}
if (config != null && StringUtils.isNotBlank(config.getResourcePoolId())) {
report.setActuator(config.getResourcePoolId());
} else {
report.setActuator("LOCAL");
}
report.setTriggerMode(triggerMode);
report.setReportVersion(2);
report.setExecuteType(execType);
report.setProjectId(projectId);
report.setScenarioName(scenarioName);
report.setScenarioId(scenarioId);
if (config != null) {
report.setEnvConfig(JSON.toJSONString(config));
}
report.setReportType(ReportTypeConstants.SCENARIO_INDEPENDENT.name());
return report;
}
/**
* 返回正确的报告状态
*
* @param dto jmeter返回
* @return
*/
private String getStatus(ResultDTO dto) {
if (MapUtils.isNotEmpty(dto.getArbitraryData()) && dto.getArbitraryData().containsKey("REPORT_STATUS")) {
return String.valueOf(dto.getArbitraryData().get("REPORT_STATUS"));
}
long errorSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.equalsIgnoreCase(requestResult.getStatus(), ScenarioStatus.Error.name())).count();
long errorReportResultSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.equalsIgnoreCase(requestResult.getStatus(), ExecuteResult.ERROR_REPORT_RESULT.toString())).count();
if (StringUtils.isNotEmpty(dto.getRunMode()) && dto.getRunMode().startsWith("UI")) {
try {
errorSize = dto.getRequestResults().stream().filter(requestResult ->
StringUtils.isNotEmpty(requestResult.getResponseResult().getHeaders())
&& JSONArray.parseArray(requestResult.getResponseResult().getHeaders()) .stream().filter(
r -> ((JSONObject) r).containsKey("success") && !((JSONObject) r).getBoolean("success")
).count() > 0)
.count();
} catch (Exception e) {
errorSize = 1;
}
}
String status = dto.getRequestResults().isEmpty() ? ExecuteResult.UN_EXECUTE.toString() : ScenarioStatus.Success.name();
if (errorSize > 0) {
status = ScenarioStatus.Error.name();
} else if (errorReportResultSize > 0) {
status = ExecuteResult.ERROR_REPORT_RESULT.toString();
}
if (dto != null && dto.getArbitraryData() != null && dto.getArbitraryData().containsKey("TIMEOUT") && (Boolean) dto.getArbitraryData().get("TIMEOUT")) {
LoggerUtil.info("资源 " + dto.getTestId() + " 执行超时", dto.getReportId());
status = ScenarioStatus.Timeout.name();
}
return status;
}
public List<PlanReportCaseDTO> selectForPlanReport(List<String> reportIds) {
return extApiScenarioReportMapper.selectForPlanReport(reportIds);
}
public void cleanUpReport(long time, String projectId) {
List<String> ids = extApiScenarioReportMapper.selectByProjectIdAndLessThanTime(projectId, time);
List<String> definitionExecIds = extApiDefinitionExecResultMapper.selectByProjectIdAndLessThanTime(projectId, time);
ids.addAll(definitionExecIds);
if (CollectionUtils.isNotEmpty(ids)) {
APIReportBatchRequest request = new APIReportBatchRequest();
request.setIds(ids);
request.setSelectAllDate(false);
deleteAPIReportBatch(request);
}
}
@Transactional(propagation = Propagation.REQUIRES_NEW)
public void batchSave(Map<String, RunModeDataDTO> executeQueue, String serialReportId, String runMode, List<MsExecResponseDTO> responseDTOS) {
List<APIScenarioReportResult> list = new LinkedList<>();
if (StringUtils.isEmpty(serialReportId)) {
for (String reportId : executeQueue.keySet()) {
APIScenarioReportResult report = executeQueue.get(reportId).getReport();
list.add(report);
responseDTOS.add(new MsExecResponseDTO(executeQueue.get(reportId).getTestId(), reportId, runMode));
}
if (CollectionUtils.isNotEmpty(list)) {
extApiScenarioReportMapper.sqlInsert(list);
}
}
}
public void reName(ApiScenarioReport reportRequest) {
if (StringUtils.equalsAnyIgnoreCase(reportRequest.getReportType(), ReportTypeConstants.API_INDEPENDENT.name(), ReportTypeConstants.API_INTEGRATED.name())) {
ApiDefinitionExecResultWithBLOBs result = definitionExecResultMapper.selectByPrimaryKey(reportRequest.getId());
if (result != null) {
result.setName(reportRequest.getName());
definitionExecResultMapper.updateByPrimaryKeySelective(result);
}
} else {
ApiScenarioReport apiTestReport = apiScenarioReportMapper.selectByPrimaryKey(reportRequest.getId());
if (apiTestReport != null) {
apiTestReport.setName(reportRequest.getName());
apiScenarioReportMapper.updateByPrimaryKey(apiTestReport);
}
}
}
public RequestResult selectReportContent(String stepId) {
return apiScenarioReportStructureService.selectReportContent(stepId);
}
public void cleanUpUiReportImg(List<String> ids) {
try {
if (ids != null && CollectionUtils.isNotEmpty(ids)) {
for (String id : ids) {
if (FileUtil.deleteDir(new File(FileUtils.UI_IMAGE_DIR + "/" + id))) {
LogUtil.info("删除 UI 报告截图成功,报告 ID 为 :" + id);
}
}
}
} catch (Exception e) {
LogUtil.error(e.getMessage(), e);
MSException.throwException(e.getMessage());
}
}
}
|
nit: this can be replaced by a method reference `.map(DigitalTwinsResponse::getValue)`.
|
public Mono<String> getComponent(String digitalTwinId, String componentPath) {
return getComponentWithResponse(digitalTwinId, componentPath)
.map(response -> response.getValue());
}
|
.map(response -> response.getValue());
|
public Mono<String> getComponent(String digitalTwinId, String componentPath) {
return getComponentWithResponse(digitalTwinId, componentPath)
.map(DigitalTwinsResponse::getValue);
}
|
class to convert the relationship to.
* @param <T> The generic type to convert the relationship to.
* @return A {@link PagedFlux}
|
class to convert the relationship to.
* @param <T> The generic type to convert the relationship to.
* @return A {@link PagedFlux}
|
I would rethink the cli/batchMode sections here (relating to the other comment, too), and push them back into the Gradle/Maven plugin to emit help appropriately (when not coming via the cli).
|
public QuarkusCommandOutcome execute(QuarkusCommandInvocation invocation) throws QuarkusCommandException {
final MessageWriter log = invocation.log();
final boolean all = invocation.getValue(ListExtensions.ALL, true);
final boolean installedOnly = invocation.getValue(ListExtensions.INSTALLED, false);
final boolean cli = invocation.getValue(ListExtensions.FROM_CLI, false);
final String format = invocation.getValue(ListExtensions.FORMAT, "");
final String search = invocation.getValue(ListExtensions.SEARCH, "*");
final String category = invocation.getValue(ListExtensions.CATEGORY, "");
final boolean batchMode = invocation.getValue(ListExtensions.BATCH_MODE, false);
final ExtensionManager extensionManager = invocation.getValue(ListExtensions.EXTENSION_MANAGER,
invocation.getQuarkusProject().getExtensionManager());
final Collection<Extension> extensions = search == null ? invocation.getExtensionsCatalog().getExtensions()
: QuarkusCommandHandlers.select(search, invocation.getExtensionsCatalog().getExtensions(), true)
.getExtensions();
if (extensions.isEmpty()) {
log.info("No extension found with pattern '%s'", search);
return QuarkusCommandOutcome.success();
}
if (!cli) {
String extensionStatus = all ? "available" : "installable";
if (installedOnly)
extensionStatus = "installed";
log.info("%nCurrent Quarkus extensions %s: ", extensionStatus);
}
BiConsumer<MessageWriter, Object[]> currentFormatter;
switch (format.toLowerCase()) {
case "full":
currentFormatter = this::fullFormatter;
log.info(String.format(FULL_FORMAT, "Status", "Extension", "ArtifactId", "Updated Version", "Guide"));
break;
case "origins":
currentFormatter = this::originsFormatter;
break;
case "concise":
currentFormatter = this::conciseFormatter;
break;
case "name":
default:
currentFormatter = this::nameFormatter;
break;
}
Map<ArtifactKey, ArtifactCoords> installedByKey;
try {
installedByKey = extensionManager.getInstalled().stream()
.collect(toMap(ArtifactCoords::getKey, Function.identity()));
} catch (IOException e) {
throw new QuarkusCommandException("Failed to determine the list of installed extensions", e);
}
Predicate<Extension> categoryFilter;
if (StringUtils.isNotBlank(category)) {
categoryFilter = e -> ExtensionProcessor.of(e).getCategories().contains(category);
} else {
categoryFilter = e -> true;
}
extensions.stream()
.filter(e -> !ExtensionProcessor.of(e).isUnlisted())
.filter(categoryFilter)
.sorted(Comparator.comparing(e -> e.getArtifact().getArtifactId()))
.forEach(e -> display(log, e, installedByKey.get(toKey(e)), all, installedOnly, currentFormatter));
final BuildTool buildTool = invocation.getQuarkusProject().getBuildTool();
boolean isGradle = BuildTool.GRADLE.equals(buildTool) || BuildTool.GRADLE_KOTLIN_DSL.equals(buildTool);
if (!cli) {
if ("concise".equalsIgnoreCase(format) || StringUtils.isBlank(format)) {
log.info("");
if (isGradle) {
log.info(MORE_INFO_HINT, "--format=full");
} else {
log.info(MORE_INFO_HINT, "-Dformat=full");
}
}
if (!installedOnly) {
if (StringUtils.isBlank(category)) {
log.info("");
if (isGradle) {
log.info(FILTER_HINT, "--category=\"categoryId\"");
} else {
log.info(FILTER_HINT, "-Dcategory=\"categoryId\"");
}
}
}
log.info("");
if (isGradle) {
log.info(ADD_EXTENSION_HINT, "build.gradle", "./gradlew addExtension --extensions=\"artifactId\"");
} else {
log.info(ADD_EXTENSION_HINT, "pom.xml", "./mvnw quarkus:add-extension -Dextensions=\"artifactId\"");
}
} else if (!batchMode) {
if (StringUtils.isBlank(format)) {
log.info("");
log.info(MORE_INFO_HINT, "--full");
}
if (!installedOnly) {
if (StringUtils.isBlank(category)) {
log.info("");
log.info(FILTER_HINT, "--category \"categoryId\"");
}
log.info("");
if (isGradle) {
log.info(ADD_EXTENSION_HINT, "build.gradle", "quarkus extension add \"artifactId\"");
} else {
log.info(ADD_EXTENSION_HINT, "pom.xml", "quarkus extension add \"artifactId\"");
}
}
}
return QuarkusCommandOutcome.success();
}
|
if ("concise".equalsIgnoreCase(format) || StringUtils.isBlank(format)) {
|
public QuarkusCommandOutcome execute(QuarkusCommandInvocation invocation) throws QuarkusCommandException {
final MessageWriter log = invocation.log();
final boolean all = invocation.getValue(ListExtensions.ALL, true);
final boolean installedOnly = invocation.getValue(ListExtensions.INSTALLED, false);
final boolean cli = invocation.getValue(ListExtensions.FROM_CLI, false);
final String format = invocation.getValue(ListExtensions.FORMAT, "");
final String search = invocation.getValue(ListExtensions.SEARCH, "*");
final String category = invocation.getValue(ListExtensions.CATEGORY, "");
final boolean batchMode = invocation.getValue(ListExtensions.BATCH_MODE, false);
final ExtensionManager extensionManager = invocation.getValue(ListExtensions.EXTENSION_MANAGER,
invocation.getQuarkusProject().getExtensionManager());
final Collection<Extension> extensions = search == null ? invocation.getExtensionsCatalog().getExtensions()
: QuarkusCommandHandlers.select(search, invocation.getExtensionsCatalog().getExtensions(), true)
.getExtensions();
if (extensions.isEmpty()) {
log.info("No extension found with pattern '%s'", search);
return QuarkusCommandOutcome.success();
}
if (!batchMode) {
String extensionStatus = all ? "available" : "installable";
if (installedOnly)
extensionStatus = "installed";
log.info("Current Quarkus extensions %s: ", extensionStatus);
log.info("");
}
BiConsumer<MessageWriter, Object[]> currentFormatter;
switch (format.toLowerCase()) {
case "full":
currentFormatter = this::fullFormatter;
log.info(String.format(FULL_FORMAT, "Status", "Extension", "ArtifactId", "Updated Version", "Guide"));
break;
case "origins":
currentFormatter = this::originsFormatter;
break;
case "concise":
currentFormatter = this::conciseFormatter;
break;
case "id":
default:
currentFormatter = this::nameFormatter;
break;
}
Map<ArtifactKey, ArtifactCoords> installedByKey;
try {
installedByKey = extensionManager.getInstalled().stream()
.collect(toMap(ArtifactCoords::getKey, Function.identity()));
} catch (IOException e) {
throw new QuarkusCommandException("Failed to determine the list of installed extensions", e);
}
Predicate<Extension> categoryFilter;
if (category != null && !category.isBlank()) {
categoryFilter = e -> ExtensionProcessor.of(e).getCategories().contains(category);
} else {
categoryFilter = e -> true;
}
extensions.stream()
.filter(e -> !ExtensionProcessor.of(e).isUnlisted())
.filter(categoryFilter)
.sorted(Comparator.comparing(e -> e.getArtifact().getArtifactId()))
.forEach(e -> display(log, e, installedByKey.get(toKey(e)), all, installedOnly, currentFormatter));
return QuarkusCommandOutcome.success();
}
|
class ListExtensionsCommandHandler implements QuarkusCommandHandler {
private static final String FULL_FORMAT = "%-8s %-50s %-50s %-25s%s";
private static final String CONCISE_FORMAT = "%-50s %-50s";
private static final String NAME_FORMAT = "%-50s";
private static final String ORIGINS_FORMAT = "%-50s %-60s %s";
private static final String MORE_INFO_HINT = "To get more information, append `%s` to your command line.";
private static final String FILTER_HINT = "To list only extensions from specific category, append " +
"`%s` to your command line.";
private static final String ADD_EXTENSION_HINT = "Add an extension to your project by adding the dependency to your " +
"%s or use `%s`";
@Override
private void conciseFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
writer.info(String.format(CONCISE_FORMAT, e.getName(), e.getArtifact().getArtifactId()));
}
private void fullFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
final String guide = getGuide(e);
writer.info(String.format(FULL_FORMAT, cols[0], e.getName(), e.getArtifact().getArtifactId(), cols[2],
guide == null ? "" : guide));
}
private void nameFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
writer.info(String.format(NAME_FORMAT, e.getArtifact().getArtifactId()));
}
private void originsFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
String origin = null;
int i = 0;
final List<ExtensionOrigin> origins = e.getOrigins();
while (i < origins.size() && origin == null) {
final ExtensionOrigin o = origins.get(i++);
if (o.isPlatform()) {
origin = o.getBom().toString();
}
}
writer.info(String.format(ORIGINS_FORMAT, e.getName(), e.getArtifact().getVersion(), origin == null ? "" : origin));
while (i < origins.size()) {
final ExtensionOrigin o = origins.get(i++);
if (o.isPlatform()) {
writer.info(String.format(ORIGINS_FORMAT, "", "", o.getBom().toString()));
}
}
}
private void display(MessageWriter messageWriter, final Extension e, final ArtifactCoords installed,
boolean all,
boolean installedOnly,
BiConsumer<MessageWriter, Object[]> formatter) {
if (installedOnly && installed == null) {
return;
}
if (!installedOnly && !all && installed != null) {
return;
}
String label = "";
String version = "";
if (installed != null) {
final String installedVersion = installed.getVersion();
if (installedVersion == null) {
label = "default";
version = e.getArtifact().getVersion();
} else if (installedVersion.equalsIgnoreCase(e.getArtifact().getVersion())) {
label = "custom";
version = installedVersion;
} else {
label = "custom*";
version = String.format("%s* <> %s", installedVersion, e.getArtifact().getVersion());
}
}
formatter.accept(messageWriter, new Object[] { label, e, version });
}
}
|
class ListExtensionsCommandHandler implements QuarkusCommandHandler {
private static final String FULL_FORMAT = "%-8s %-50s %-50s %-25s%s";
private static final String CONCISE_FORMAT = "%-50s %-50s";
private static final String NAME_FORMAT = "%-50s";
private static final String ORIGINS_FORMAT = "%-50s %-60s %s";
@Override
private void conciseFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
writer.info(String.format(CONCISE_FORMAT, e.getName(), e.getArtifact().getArtifactId()));
}
private void fullFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
final String guide = getGuide(e);
writer.info(String.format(FULL_FORMAT, cols[0], e.getName(), e.getArtifact().getArtifactId(), cols[2],
guide == null ? "" : guide));
}
private void nameFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
writer.info(String.format(NAME_FORMAT, e.getArtifact().getArtifactId()));
}
private void originsFormatter(MessageWriter writer, Object[] cols) {
Extension e = (Extension) cols[1];
String origin = null;
int i = 0;
final List<ExtensionOrigin> origins = e.getOrigins();
while (i < origins.size() && origin == null) {
final ExtensionOrigin o = origins.get(i++);
if (o.isPlatform()) {
origin = o.getBom().toString();
}
}
writer.info(String.format(ORIGINS_FORMAT, e.getName(), e.getArtifact().getVersion(), origin == null ? "" : origin));
while (i < origins.size()) {
final ExtensionOrigin o = origins.get(i++);
if (o.isPlatform()) {
writer.info(String.format(ORIGINS_FORMAT, "", "", o.getBom().toString()));
}
}
}
private void display(MessageWriter messageWriter, final Extension e, final ArtifactCoords installed,
boolean all,
boolean installedOnly,
BiConsumer<MessageWriter, Object[]> formatter) {
if (installedOnly && installed == null) {
return;
}
if (!installedOnly && !all && installed != null) {
return;
}
String label = "";
String version = "";
if (installed != null) {
final String installedVersion = installed.getVersion();
if (installedVersion == null) {
label = "default";
version = e.getArtifact().getVersion();
} else if (installedVersion.equalsIgnoreCase(e.getArtifact().getVersion())) {
label = "custom";
version = installedVersion;
} else {
label = "custom*";
version = String.format("%s* <> %s", installedVersion, e.getArtifact().getVersion());
}
}
formatter.accept(messageWriter, new Object[] { label, e, version });
}
}
|
Using synchronized block on isReady works here. I don't see there is a deadlock. (1) If it runs the first synchronized block first, then it will call notify() and isReady is set to true. Then when the second synchronized block is executed, it will not reach the wait() call because isReady is true. (2) If it runs the second synchronized block first, then it will wait() and give up the lock. Then in the first synchronized block, it will pick up the lock, set isReady to true, and then call notify().
|
public void testCloseVisibleToAwaitCompletionCallerAndProducer() throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID, CODER, (value) -> {})),
Collections.emptyList());
AtomicBoolean isReady = new AtomicBoolean(false);
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC"));
synchronized (isReady) {
isReady.set(true);
isReady.notify();
}
assertThrows(
BeamFnDataInboundObserver.CloseException.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
Future<?> future2 =
executor.submit(
() -> {
synchronized (isReady) {
while (!isReady.get()) {
isReady.wait();
}
}
observer.close();
return null;
});
assertThrows(BeamFnDataInboundObserver.CloseException.class, () -> observer.awaitCompletion());
future.get();
future2.get();
}
|
synchronized (isReady) {
|
public void testCloseVisibleToAwaitCompletionCallerAndProducer() throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID, CODER, (value) -> {})),
Collections.emptyList());
AtomicBoolean isReady = new AtomicBoolean(false);
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC"));
synchronized (isReady) {
isReady.set(true);
isReady.notify();
}
assertThrows(
BeamFnDataInboundObserver.CloseException.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
Future<?> future2 =
executor.submit(
() -> {
synchronized (isReady) {
while (!isReady.get()) {
isReady.wait();
}
}
observer.close();
return null;
});
assertThrows(BeamFnDataInboundObserver.CloseException.class, () -> observer.awaitCompletion());
future.get();
future2.get();
}
|
class BeamFnDataInboundObserverTest {
private static final Coder<WindowedValue<String>> CODER =
WindowedValue.getFullCoder(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE);
private static final String TRANSFORM_ID = "transformId";
private static final String TIMER_FAMILY_ID = "timerFamilyId";
@Rule
public final TestExecutorService executor = TestExecutors.from(Executors::newCachedThreadPool);
@Test
public void testConsumptionOfValuesHappensOnAwaitCompletionCallersThread() throws Exception {
Thread thread = Thread.currentThread();
Collection<WindowedValue<String>> values = new ArrayList<>();
Collection<WindowedValue<String>> timers = new ArrayList<>();
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(
DataEndpoint.create(
TRANSFORM_ID,
CODER,
(value) -> {
assertSame(thread, Thread.currentThread());
values.add(value);
})),
Arrays.asList(
TimerEndpoint.create(
TRANSFORM_ID,
TIMER_FAMILY_ID,
CODER,
(value) -> {
assertSame(thread, Thread.currentThread());
timers.add(value);
})));
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC", "DEF", "GHI"));
observer.accept(lastData());
observer.accept(timerWith("UVW"));
observer.accept(timerWith("XYZ"));
observer.accept(lastTimer());
return null;
});
observer.awaitCompletion();
assertThat(
values,
contains(
WindowedValue.valueInGlobalWindow("ABC"),
WindowedValue.valueInGlobalWindow("DEF"),
WindowedValue.valueInGlobalWindow("GHI")));
assertThat(
timers,
contains(
WindowedValue.valueInGlobalWindow("UVW"), WindowedValue.valueInGlobalWindow("XYZ")));
future.get();
}
@Test
public void testAwaitCompletionFailureVisibleToAwaitCompletionCallerAndProducer()
throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(
DataEndpoint.create(
TRANSFORM_ID,
CODER,
(value) -> {
throw new Exception("test consumer failed");
})),
Collections.emptyList());
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC"));
assertThrows(
"test consumer failed",
Exception.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
assertThrows("test consumer failed", Exception.class, () -> observer.awaitCompletion());
future.get();
}
@Test
@Test
public void testBadProducerDataFailureVisibleToAwaitCompletionCallerAndProducer()
throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID, CODER, (value) -> {})),
Collections.emptyList());
Future<?> future =
executor.submit(
() -> {
observer.accept(timerWith("DEF"));
assertThrows(
"Unable to find inbound timer receiver for instruction",
IllegalStateException.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
assertThrows(
"Unable to find inbound timer receiver for instruction",
IllegalStateException.class,
() -> observer.awaitCompletion());
future.get();
}
private BeamFnApi.Elements dataWith(String... values) throws Exception {
ByteStringOutputStream output = new ByteStringOutputStream();
for (String value : values) {
CODER.encode(WindowedValue.valueInGlobalWindow(value), output);
}
return BeamFnApi.Elements.newBuilder()
.addData(
BeamFnApi.Elements.Data.newBuilder()
.setTransformId(TRANSFORM_ID)
.setData(output.toByteString()))
.build();
}
private BeamFnApi.Elements lastData() throws Exception {
return BeamFnApi.Elements.newBuilder()
.addData(BeamFnApi.Elements.Data.newBuilder().setTransformId(TRANSFORM_ID).setIsLast(true))
.build();
}
private BeamFnApi.Elements timerWith(String... values) throws Exception {
ByteStringOutputStream output = new ByteStringOutputStream();
for (String value : values) {
CODER.encode(WindowedValue.valueInGlobalWindow(value), output);
}
return BeamFnApi.Elements.newBuilder()
.addTimers(
BeamFnApi.Elements.Timers.newBuilder()
.setTransformId(TRANSFORM_ID)
.setTimerFamilyId(TIMER_FAMILY_ID)
.setTimers(output.toByteString()))
.build();
}
private BeamFnApi.Elements lastTimer() throws Exception {
return BeamFnApi.Elements.newBuilder()
.addTimers(
BeamFnApi.Elements.Timers.newBuilder()
.setTransformId(TRANSFORM_ID)
.setTimerFamilyId(TIMER_FAMILY_ID)
.setIsLast(true))
.build();
}
}
|
class BeamFnDataInboundObserverTest {
private static final Coder<WindowedValue<String>> CODER =
WindowedValue.getFullCoder(StringUtf8Coder.of(), GlobalWindow.Coder.INSTANCE);
private static final String TRANSFORM_ID = "transformId";
private static final String TIMER_FAMILY_ID = "timerFamilyId";
@Rule
public final TestExecutorService executor = TestExecutors.from(Executors::newCachedThreadPool);
@Test
public void testConsumptionOfValuesHappensOnAwaitCompletionCallersThread() throws Exception {
Thread thread = Thread.currentThread();
Collection<WindowedValue<String>> values = new ArrayList<>();
Collection<WindowedValue<String>> timers = new ArrayList<>();
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(
DataEndpoint.create(
TRANSFORM_ID,
CODER,
(value) -> {
assertSame(thread, Thread.currentThread());
values.add(value);
})),
Arrays.asList(
TimerEndpoint.create(
TRANSFORM_ID,
TIMER_FAMILY_ID,
CODER,
(value) -> {
assertSame(thread, Thread.currentThread());
timers.add(value);
})));
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC", "DEF", "GHI"));
observer.accept(lastData());
observer.accept(timerWith("UVW"));
observer.accept(timerWith("XYZ"));
observer.accept(lastTimer());
return null;
});
observer.awaitCompletion();
assertThat(
values,
contains(
WindowedValue.valueInGlobalWindow("ABC"),
WindowedValue.valueInGlobalWindow("DEF"),
WindowedValue.valueInGlobalWindow("GHI")));
assertThat(
timers,
contains(
WindowedValue.valueInGlobalWindow("UVW"), WindowedValue.valueInGlobalWindow("XYZ")));
future.get();
}
@Test
public void testAwaitCompletionFailureVisibleToAwaitCompletionCallerAndProducer()
throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(
DataEndpoint.create(
TRANSFORM_ID,
CODER,
(value) -> {
throw new Exception("test consumer failed");
})),
Collections.emptyList());
Future<?> future =
executor.submit(
() -> {
observer.accept(dataWith("ABC"));
assertThrows(
"test consumer failed",
Exception.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
assertThrows("test consumer failed", Exception.class, () -> observer.awaitCompletion());
future.get();
}
@Test
@Test
public void testBadProducerDataFailureVisibleToAwaitCompletionCallerAndProducer()
throws Exception {
BeamFnDataInboundObserver observer =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID, CODER, (value) -> {})),
Collections.emptyList());
Future<?> future =
executor.submit(
() -> {
observer.accept(timerWith("DEF"));
assertThrows(
"Unable to find inbound timer receiver for instruction",
IllegalStateException.class,
() -> {
while (true) {
observer.accept(dataWith("ABC"));
}
});
return null;
});
assertThrows(
"Unable to find inbound timer receiver for instruction",
IllegalStateException.class,
() -> observer.awaitCompletion());
future.get();
}
private BeamFnApi.Elements dataWith(String... values) throws Exception {
ByteStringOutputStream output = new ByteStringOutputStream();
for (String value : values) {
CODER.encode(WindowedValue.valueInGlobalWindow(value), output);
}
return BeamFnApi.Elements.newBuilder()
.addData(
BeamFnApi.Elements.Data.newBuilder()
.setTransformId(TRANSFORM_ID)
.setData(output.toByteString()))
.build();
}
private BeamFnApi.Elements lastData() throws Exception {
return BeamFnApi.Elements.newBuilder()
.addData(BeamFnApi.Elements.Data.newBuilder().setTransformId(TRANSFORM_ID).setIsLast(true))
.build();
}
private BeamFnApi.Elements timerWith(String... values) throws Exception {
ByteStringOutputStream output = new ByteStringOutputStream();
for (String value : values) {
CODER.encode(WindowedValue.valueInGlobalWindow(value), output);
}
return BeamFnApi.Elements.newBuilder()
.addTimers(
BeamFnApi.Elements.Timers.newBuilder()
.setTransformId(TRANSFORM_ID)
.setTimerFamilyId(TIMER_FAMILY_ID)
.setTimers(output.toByteString()))
.build();
}
private BeamFnApi.Elements lastTimer() throws Exception {
return BeamFnApi.Elements.newBuilder()
.addTimers(
BeamFnApi.Elements.Timers.newBuilder()
.setTransformId(TRANSFORM_ID)
.setTimerFamilyId(TIMER_FAMILY_ID)
.setIsLast(true))
.build();
}
}
|
`IOException`? (`org.apache.flink.runtime.checkpoint.channel.ChannelStateChunkReader#readChunk` already can handle `IOException`).
|
public void recover(ResultSubpartitionInfo subpartitionInfo, Tuple2<BufferBuilder, BufferConsumer> bufferBuilderAndConsumer) {
bufferBuilderAndConsumer.f0.finish();
if (bufferBuilderAndConsumer.f1.isDataAvailable()) {
boolean added = getSubpartition(subpartitionInfo).add(bufferBuilderAndConsumer.f1);
if (!added) {
throw new RuntimeException("Buffer consumer couldn't be added to ResultSubpartition");
}
} else {
bufferBuilderAndConsumer.f1.close();
}
}
|
throw new RuntimeException("Buffer consumer couldn't be added to ResultSubpartition");
|
public void recover(ResultSubpartitionInfo subpartitionInfo, Tuple2<BufferBuilder, BufferConsumer> bufferBuilderAndConsumer) throws IOException {
bufferBuilderAndConsumer.f0.finish();
if (bufferBuilderAndConsumer.f1.isDataAvailable()) {
boolean added = getSubpartition(subpartitionInfo).add(bufferBuilderAndConsumer.f1);
if (!added) {
throw new IOException("Buffer consumer couldn't be added to ResultSubpartition");
}
} else {
bufferBuilderAndConsumer.f1.close();
}
}
|
class ResultSubpartitionRecoveredStateHandler implements RecoveredChannelStateHandler<ResultSubpartitionInfo, Tuple2<BufferBuilder, BufferConsumer>> {
private final ResultPartitionWriter[] writers;
ResultSubpartitionRecoveredStateHandler(ResultPartitionWriter[] writers) {
this.writers = writers;
}
@Override
public BufferWithContext<Tuple2<BufferBuilder, BufferConsumer>> getBuffer(ResultSubpartitionInfo subpartitionInfo) throws IOException, InterruptedException {
BufferBuilder bufferBuilder = getSubpartition(subpartitionInfo).requestBufferBuilderBlocking();
return new BufferWithContext<>(wrap(bufferBuilder), Tuple2.of(bufferBuilder, bufferBuilder.createBufferConsumer()));
}
@Override
private CheckpointedResultSubpartition getSubpartition(ResultSubpartitionInfo subpartitionInfo) {
ResultPartitionWriter writer = writers[subpartitionInfo.getPartitionIdx()];
if (writer instanceof CheckpointedResultPartition) {
return ((CheckpointedResultPartition) writer).getCheckpointedSubpartition(subpartitionInfo.getSubPartitionIdx());
} else {
throw new IllegalStateException(
"Cannot restore state to a non-checkpointable partition type: " + writer);
}
}
@Override
public void close() {
}
}
|
class ResultSubpartitionRecoveredStateHandler implements RecoveredChannelStateHandler<ResultSubpartitionInfo, Tuple2<BufferBuilder, BufferConsumer>> {
private final ResultPartitionWriter[] writers;
ResultSubpartitionRecoveredStateHandler(ResultPartitionWriter[] writers) {
this.writers = writers;
}
@Override
public BufferWithContext<Tuple2<BufferBuilder, BufferConsumer>> getBuffer(ResultSubpartitionInfo subpartitionInfo) throws IOException, InterruptedException {
BufferBuilder bufferBuilder = getSubpartition(subpartitionInfo).requestBufferBuilderBlocking();
return new BufferWithContext<>(wrap(bufferBuilder), Tuple2.of(bufferBuilder, bufferBuilder.createBufferConsumer()));
}
@Override
private CheckpointedResultSubpartition getSubpartition(ResultSubpartitionInfo subpartitionInfo) {
ResultPartitionWriter writer = writers[subpartitionInfo.getPartitionIdx()];
if (writer instanceof CheckpointedResultPartition) {
return ((CheckpointedResultPartition) writer).getCheckpointedSubpartition(subpartitionInfo.getSubPartitionIdx());
} else {
throw new IllegalStateException(
"Cannot restore state to a non-checkpointable partition type: " + writer);
}
}
@Override
public void close() {
}
}
|
- These need to go in `ASTBuilderUtil` - There's already two methods to create `BLangIndexBasedAccess` in `ASTBuilderUtil` which we can probably reuse here - We probably don't need to set pos
|
private BLangBlockStmt desugarForeachToWhile(BLangForeach foreach, BLangSimpleVariableDef varDef) {
BVarSymbol iteratorSymbol = varDef.var.symbol;
BVarSymbol resultSymbol = new BVarSymbol(0, names.fromString("$result$"), this.env.scope.owner.pkgID,
foreach.nillableResultType, this.env.scope.owner, foreach.pos,
VIRTUAL);
BLangSimpleVariableDef resultVariableDefinition = getIteratorNextVariableDefinition(foreach.pos,
foreach.nillableResultType, iteratorSymbol, resultSymbol);
BLangType userDefineType = getUserDefineTypeNode(foreach.resultType);
BLangSimpleVarRef resultReferenceInWhile = ASTBuilderUtil.createVariableRef(foreach.pos, resultSymbol);
BLangTypeTestExpr typeTestExpr = ASTBuilderUtil
.createTypeTestExpr(foreach.pos, resultReferenceInWhile, userDefineType);
BLangWhile whileNode = (BLangWhile) TreeBuilder.createWhileNode();
whileNode.pos = foreach.pos;
whileNode.expr = typeTestExpr;
whileNode.body = foreach.body;
BLangAssignment resultAssignment = getIteratorNextAssignment(foreach.pos, iteratorSymbol, resultSymbol);
VariableDefinitionNode variableDefinitionNode = foreach.variableDefinitionNode;
BLangFieldBasedAccess valueAccessExpr = getValueAccessExpression(foreach.pos, foreach.varType, resultSymbol);
valueAccessExpr.expr = addConversionExprIfRequired(valueAccessExpr.expr,
types.getSafeType(valueAccessExpr.expr.type, true, false));
variableDefinitionNode.getVariable()
.setInitialExpression(addConversionExprIfRequired(valueAccessExpr, foreach.varType));
whileNode.body.stmts.add(0, (BLangStatement) variableDefinitionNode);
whileNode.body.stmts.add(1, resultAssignment);
BLangBlockStmt blockNode = ASTBuilderUtil.createBlockStmt(foreach.pos);
blockNode.addStatement(varDef);
blockNode.addStatement(resultVariableDefinition);
blockNode.addStatement(whileNode);
return blockNode;
}
private BLangType getUserDefineTypeNode(BType type) {
BLangUserDefinedType recordType =
new BLangUserDefinedType(ASTBuilderUtil.createIdentifier(null, ""),
ASTBuilderUtil.createIdentifier(null, ""));
recordType.type = type;
return recordType;
}
@Override
public void visit(BLangWhile whileNode) {
if (whileNode.onFailClause != null) {
BLangOnFailClause onFailClause = whileNode.onFailClause;
whileNode.onFailClause = null;
whileNode.body.isBreakable = false;
BLangDo doStmt = wrapStatementWithinDo(whileNode.pos, whileNode, onFailClause);
result = rewrite(doStmt, env);
} else {
whileNode.expr = rewriteExpr(whileNode.expr);
whileNode.body = rewrite(whileNode.body, env);
result = whileNode;
}
}
private BLangDo wrapStatementWithinDo(Location location, BLangStatement statement,
BLangOnFailClause onFailClause) {
BLangDo bLDo = (BLangDo) TreeBuilder.createDoNode();
BLangBlockStmt doBlock = ASTBuilderUtil.createBlockStmt(location);
doBlock.scope = new Scope(env.scope.owner);
bLDo.body = doBlock;
bLDo.pos = location;
bLDo.onFailClause = onFailClause;
bLDo.body.isBreakable = true;
doBlock.stmts.add(statement);
return bLDo;
}
@Override
public void visit(BLangLock lockNode) {
BLangOnFailClause currentOnFailClause = this.onFailClause;
BLangSimpleVariableDef currentOnFailCallDef = this.onFailCallFuncDef;
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(lockNode.pos);
if (lockNode.onFailClause != null) {
blockStmt.isBreakable = true;
rewrite(lockNode.onFailClause, env);
}
BLangLockStmt lockStmt = new BLangLockStmt(lockNode.pos);
blockStmt.addStatement(lockStmt);
enclLocks.push(lockStmt);
BLangLiteral nilLiteral = ASTBuilderUtil.createLiteral(lockNode.pos, symTable.nilType, Names.NIL_VALUE);
BType nillableError = BUnionType.create(null, symTable.errorType, symTable.nilType);
BLangStatementExpression statementExpression = createStatementExpression(lockNode.body, nilLiteral);
statementExpression.type = symTable.nilType;
BLangTrapExpr trapExpr = (BLangTrapExpr) TreeBuilder.createTrapExpressionNode();
trapExpr.type = nillableError;
trapExpr.expr = statementExpression;
BVarSymbol nillableErrorVarSymbol = new BVarSymbol(0, names.fromString("$errorResult"),
this.env.scope.owner.pkgID, nillableError,
this.env.scope.owner, lockNode.pos, VIRTUAL);
BLangSimpleVariable simpleVariable = ASTBuilderUtil.createVariable(lockNode.pos, "$errorResult",
nillableError, trapExpr,
nillableErrorVarSymbol);
BLangSimpleVariableDef simpleVariableDef = ASTBuilderUtil.createVariableDef(lockNode.pos, simpleVariable);
blockStmt.addStatement(simpleVariableDef);
BLangUnLockStmt unLockStmt = new BLangUnLockStmt(lockNode.pos);
unLockStmt.relatedLock = lockStmt;
blockStmt.addStatement(unLockStmt);
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(lockNode.pos, nillableErrorVarSymbol);
BLangBlockStmt ifBody = ASTBuilderUtil.createBlockStmt(lockNode.pos);
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = lockNode.pos;
panicNode.expr = addConversionExprIfRequired(varRef, symTable.errorType);
ifBody.addStatement(panicNode);
BLangTypeTestExpr isErrorTest =
ASTBuilderUtil.createTypeTestExpr(lockNode.pos, varRef, getErrorTypeNode());
isErrorTest.type = symTable.booleanType;
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(lockNode.pos, isErrorTest, ifBody, null);
blockStmt.addStatement(ifelse);
result = rewrite(blockStmt, env);
enclLocks.pop();
this.onFailClause = currentOnFailClause;
this.onFailCallFuncDef = currentOnFailCallDef;
}
@Override
public void visit(BLangLockStmt lockStmt) {
result = lockStmt;
}
@Override
public void visit(BLangUnLockStmt unLockStmt) {
result = unLockStmt;
}
private BLangOnFailClause createTrxInternalOnFail(Location pos, BLangSimpleVarRef shouldPanicRef) {
BLangOnFailClause trxOnFailClause = (BLangOnFailClause) TreeBuilder.createOnFailClauseNode();
trxOnFailClause.pos = pos;
trxOnFailClause.body = ASTBuilderUtil.createBlockStmt(pos);
trxOnFailClause.body.scope = new Scope(env.scope.owner);
trxOnFailClause.isInternal = true;
BVarSymbol trxOnFailErrorSym = new BVarSymbol(0, names.fromString("$trxError$"),
env.scope.owner.pkgID, symTable.errorType, env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable trxOnFailError = ASTBuilderUtil.createVariable(pos,
"$trxError$", symTable.errorType, null, trxOnFailErrorSym);
trxOnFailClause.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos,
trxOnFailError);
trxOnFailClause.body.scope.define(trxOnFailErrorSym.name, trxOnFailErrorSym);
transactionDesugar.createRollbackIfFailed(pos, trxOnFailClause.body, trxOnFailErrorSym, trxBlockId);
BLangGroupExpr shouldNotPanic = new BLangGroupExpr();
shouldNotPanic.type = symTable.booleanType;
shouldNotPanic.expression = createNotBinaryExpression(pos, shouldPanicRef);
BLangSimpleVarRef caughtError = ASTBuilderUtil.createVariableRef(pos, trxOnFailErrorSym);
BLangBlockStmt failBlock = ASTBuilderUtil.createBlockStmt(pos);
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = pos;
panicNode.expr = caughtError;
BLangIf exitIf = ASTBuilderUtil.createIfElseStmt(pos, shouldNotPanic, failBlock, panicNode);
trxOnFailClause.body.stmts.add(exitIf);
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = pos;
failStmt.expr = caughtError;
failBlock.stmts.add(failStmt);
trxOnFailClause.bodyContainsFail = true;
return trxOnFailClause;
}
@Override
public void visit(BLangTransaction transactionNode) {
if (transactionNode.onFailClause != null) {
BLangOnFailClause onFailClause = transactionNode.onFailClause;
transactionNode.onFailClause = null;
transactionNode.transactionBody.isBreakable = false;
BLangDo doStmt = wrapStatementWithinDo(transactionNode.pos, transactionNode, onFailClause);
result = rewrite(doStmt, env);
} else {
BLangLiteral currentTrxBlockId = this.trxBlockId;
String uniqueId = String.valueOf(++transactionBlockCount);
this.trxBlockId = ASTBuilderUtil.createLiteral(transactionNode.pos, symTable.stringType, uniqueId);
boolean currShouldReturnErrors = this.shouldReturnErrors;
this.shouldReturnErrors = true;
BLangOnFailClause currOnFailClause = this.onFailClause;
BLangSimpleVariableDef currOnFailCallDef = this.onFailCallFuncDef;
BLangLiteral falseLiteral = ASTBuilderUtil.createLiteral(transactionNode.pos, symTable.booleanType, false);
BVarSymbol shouldPanicVarSymbol = new BVarSymbol(0, names.fromString("$shouldPanic$"),
env.scope.owner.pkgID, symTable.booleanType, this.env.scope.owner, transactionNode.pos, VIRTUAL);
shouldPanicVarSymbol.closure = true;
BLangSimpleVariable shouldPanicVariable = ASTBuilderUtil.createVariable(transactionNode.pos,
"$shouldPanic$", symTable.booleanType, falseLiteral, shouldPanicVarSymbol);
BLangSimpleVariableDef shouldPanicDef = ASTBuilderUtil.createVariableDef(transactionNode.pos,
shouldPanicVariable);
BLangSimpleVarRef shouldPanicRef = ASTBuilderUtil.createVariableRef(transactionNode.pos,
shouldPanicVarSymbol);
BLangOnFailClause trxInternalOnFail = createTrxInternalOnFail(transactionNode.pos, shouldPanicRef);
enclosingShouldPanic.put(trxInternalOnFail, shouldPanicRef);
boolean userDefinedOnFailAvbl = this.onFailClause != null;
analyzeOnFailClause(trxInternalOnFail, transactionNode.transactionBody);
BLangBlockStmt transactionStmtBlock =
transactionDesugar.rewrite(transactionNode, trxBlockId, env, uniqueId);
transactionStmtBlock.stmts.add(0, shouldPanicDef);
transactionStmtBlock.scope.define(shouldPanicVarSymbol.name, shouldPanicVarSymbol);
transactionStmtBlock.isBreakable = !userDefinedOnFailAvbl;
result = rewrite(transactionStmtBlock, this.env);
this.shouldReturnErrors = currShouldReturnErrors;
this.trxBlockId = currentTrxBlockId;
swapAndResetEnclosingOnFail(currOnFailClause, currOnFailCallDef);
}
}
@Override
public void visit(BLangRollback rollbackNode) {
BLangBlockStmt rollbackStmtExpr = transactionDesugar.desugar(rollbackNode, trxBlockId);
result = rewrite(rollbackStmtExpr, env);
}
private BLangOnFailClause createRetryInternalOnFail(Location pos,
BLangSimpleVarRef retryResultRef,
BLangSimpleVarRef retryManagerRef,
BLangSimpleVarRef shouldRetryRef,
BLangSimpleVarRef continueLoopRef,
BLangSimpleVarRef returnResult,
boolean shouldRollback) {
BLangOnFailClause internalOnFail = (BLangOnFailClause) TreeBuilder.createOnFailClauseNode();
internalOnFail.pos = pos;
internalOnFail.body = ASTBuilderUtil.createBlockStmt(pos);
internalOnFail.body.scope = new Scope(env.scope.owner);
BVarSymbol caughtErrorSym = new BVarSymbol(0, names.fromString("$caughtError$"),
env.scope.owner.pkgID, symTable.errorType, env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable caughtError = ASTBuilderUtil.createVariable(pos,
"$caughtError$", symTable.errorType, null, caughtErrorSym);
internalOnFail.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos,
caughtError);
env.scope.define(caughtErrorSym.name, caughtErrorSym);
BLangSimpleVarRef caughtErrorRef = ASTBuilderUtil.createVariableRef(pos, caughtErrorSym);
BLangAssignment errorAssignment = ASTBuilderUtil.createAssignmentStmt(pos, retryResultRef, caughtErrorRef);
internalOnFail.body.stmts.add(errorAssignment);
BLangAssignment continueLoopTrue = ASTBuilderUtil.createAssignmentStmt(pos, continueLoopRef,
ASTBuilderUtil.createLiteral(pos, symTable.booleanType, true));
internalOnFail.body.stmts.add(continueLoopTrue);
if (shouldRollback) {
transactionDesugar.createRollbackIfFailed(pos, internalOnFail.body, caughtErrorSym, trxBlockId);
}
BLangInvocation shouldRetryInvocation = createRetryManagerShouldRetryInvocation(pos,
retryManagerRef, caughtErrorRef);
BLangAssignment shouldRetryAssignment = ASTBuilderUtil.createAssignmentStmt(pos, shouldRetryRef,
shouldRetryInvocation);
internalOnFail.body.stmts.add(shouldRetryAssignment);
BLangGroupExpr shouldNotRetryCheck = new BLangGroupExpr();
shouldNotRetryCheck.type = symTable.booleanType;
shouldNotRetryCheck.expression = createNotBinaryExpression(pos, shouldRetryRef);
BLangGroupExpr exitCheck = new BLangGroupExpr();
exitCheck.type = symTable.booleanType;
exitCheck.expression = shouldNotRetryCheck;
BLangBlockStmt exitLogicBlock = ASTBuilderUtil.createBlockStmt(pos);
BLangIf exitIf = ASTBuilderUtil.createIfElseStmt(pos, exitCheck, exitLogicBlock, null);
if (this.onFailClause != null) {
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = pos;
failStmt.expr = retryResultRef;
exitLogicBlock.stmts.add(failStmt);
internalOnFail.bodyContainsFail = true;
internalOnFail.body.stmts.add(exitIf);
BLangContinue loopContinueStmt = (BLangContinue) TreeBuilder.createContinueNode();
loopContinueStmt.pos = pos;
internalOnFail.body.stmts.add(loopContinueStmt);
} else {
BLangAssignment returnErrorTrue = ASTBuilderUtil.createAssignmentStmt(pos, returnResult,
ASTBuilderUtil.createLiteral(pos, symTable.booleanType, true));
exitLogicBlock.stmts.add(returnErrorTrue);
internalOnFail.body.stmts.add(exitIf);
}
return internalOnFail;
}
BLangUnaryExpr createNotBinaryExpression(Location pos, BLangExpression expression) {
List<BType> paramTypes = new ArrayList<>();
paramTypes.add(symTable.booleanType);
BInvokableType type = new BInvokableType(paramTypes, symTable.booleanType,
null);
BOperatorSymbol notOperatorSymbol = new BOperatorSymbol(
names.fromString(OperatorKind.NOT.value()), symTable.rootPkgSymbol.pkgID, type, symTable.rootPkgSymbol,
symTable.builtinPos, VIRTUAL);
return ASTBuilderUtil.createUnaryExpr(pos, expression, symTable.booleanType,
OperatorKind.NOT, notOperatorSymbol);
}
BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
List<BLangSimpleVariable> lambdaFunctionVariable,
TypeNode returnType, BLangFunctionBody lambdaBody) {
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
BLangFunction func =
ASTBuilderUtil.createFunction(pos, functionNamePrefix + UNDERSCORE + lambdaFunctionCount++);
lambdaFunction.function = func;
func.requiredParams.addAll(lambdaFunctionVariable);
func.setReturnTypeNode(returnType);
func.desugaredReturnType = true;
defineFunction(func, env.enclPkg);
lambdaFunctionVariable = func.requiredParams;
func.body = lambdaBody;
func.desugared = false;
lambdaFunction.pos = pos;
List<BType> paramTypes = new ArrayList<>();
lambdaFunctionVariable.forEach(variable -> paramTypes.add(variable.symbol.type));
lambdaFunction.type = new BInvokableType(paramTypes, func.symbol.type.getReturnType(),
null);
return lambdaFunction;
}
protected BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
List<BLangSimpleVariable> lambdaFunctionVariable,
TypeNode returnType, List<BLangStatement> fnBodyStmts,
SymbolEnv env, Scope bodyScope) {
BLangBlockFunctionBody body = (BLangBlockFunctionBody) TreeBuilder.createBlockFunctionBodyNode();
body.scope = bodyScope;
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
this.forceCastReturnType = ((BLangType) returnType).type;
body.stmts = rewriteStmt(fnBodyStmts, bodyEnv);
this.forceCastReturnType = null;
return createLambdaFunction(pos, functionNamePrefix, lambdaFunctionVariable, returnType, body);
}
private BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
TypeNode returnType) {
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
BLangFunction func =
ASTBuilderUtil.createFunction(pos, functionNamePrefix + UNDERSCORE + lambdaFunctionCount++);
lambdaFunction.function = func;
func.setReturnTypeNode(returnType);
func.desugaredReturnType = true;
defineFunction(func, env.enclPkg);
func.desugared = false;
lambdaFunction.pos = pos;
return lambdaFunction;
}
private void defineFunction(BLangFunction funcNode, BLangPackage targetPkg) {
final BPackageSymbol packageSymbol = targetPkg.symbol;
final SymbolEnv packageEnv = this.symTable.pkgEnvMap.get(packageSymbol);
symbolEnter.defineNode(funcNode, packageEnv);
packageEnv.enclPkg.functions.add(funcNode);
packageEnv.enclPkg.topLevelNodes.add(funcNode);
}
@Override
public void visit(BLangForkJoin forkJoin) {
result = forkJoin;
}
@Override
public void visit(BLangLiteral literalExpr) {
if (literalExpr.type.tag == TypeTags.ARRAY && ((BArrayType) literalExpr.type).eType.tag == TypeTags.BYTE) {
result = rewriteBlobLiteral(literalExpr);
return;
}
result = literalExpr;
}
private BLangNode rewriteBlobLiteral(BLangLiteral literalExpr) {
String[] result = getBlobTextValue((String) literalExpr.value);
byte[] values;
if (BASE_64.equals(result[0])) {
values = Base64.getDecoder().decode(result[1].getBytes(StandardCharsets.UTF_8));
} else {
values = hexStringToByteArray(result[1]);
}
BLangArrayLiteral arrayLiteralNode = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
arrayLiteralNode.type = literalExpr.type;
arrayLiteralNode.pos = literalExpr.pos;
arrayLiteralNode.exprs = new ArrayList<>();
for (byte b : values) {
arrayLiteralNode.exprs.add(createByteLiteral(literalExpr.pos, b));
}
return arrayLiteralNode;
}
private String[] getBlobTextValue(String blobLiteralNodeText) {
String nodeText = blobLiteralNodeText.replaceAll(" ", "");
String[] result = new String[2];
result[0] = nodeText.substring(0, nodeText.indexOf('`'));
result[1] = nodeText.substring(nodeText.indexOf('`') + 1, nodeText.lastIndexOf('`'));
return result;
}
private static byte[] hexStringToByteArray(String str) {
int len = str.length();
byte[] data = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
data[i / 2] = (byte) ((Character.digit(str.charAt(i), 16) << 4) + Character.digit(str.charAt(i + 1), 16));
}
return data;
}
@Override
public void visit(BLangListConstructorExpr listConstructor) {
listConstructor.exprs = rewriteExprs(listConstructor.exprs);
BLangExpression expr;
if (listConstructor.type.tag == TypeTags.TUPLE) {
expr = new BLangTupleLiteral(listConstructor.pos, listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
} else if (listConstructor.type.tag == TypeTags.JSON) {
expr = new BLangJSONArrayLiteral(listConstructor.exprs, new BArrayType(listConstructor.type));
result = rewriteExpr(expr);
} else if (getElementType(listConstructor.type).tag == TypeTags.JSON) {
expr = new BLangJSONArrayLiteral(listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
} else if (listConstructor.type.tag == TypeTags.TYPEDESC) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = listConstructor.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
} else {
expr = new BLangArrayLiteral(listConstructor.pos, listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
}
}
@Override
public void visit(BLangTableConstructorExpr tableConstructorExpr) {
rewriteExprs(tableConstructorExpr.recordLiteralList);
result = tableConstructorExpr;
}
@Override
public void visit(BLangArrayLiteral arrayLiteral) {
arrayLiteral.exprs = rewriteExprs(arrayLiteral.exprs);
if (arrayLiteral.type.tag == TypeTags.JSON) {
result = new BLangJSONArrayLiteral(arrayLiteral.exprs, new BArrayType(arrayLiteral.type));
return;
} else if (getElementType(arrayLiteral.type).tag == TypeTags.JSON) {
result = new BLangJSONArrayLiteral(arrayLiteral.exprs, arrayLiteral.type);
return;
}
result = arrayLiteral;
}
@Override
public void visit(BLangTupleLiteral tupleLiteral) {
if (tupleLiteral.isTypedescExpr) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = tupleLiteral.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
return;
}
tupleLiteral.exprs.forEach(expr -> {
BType expType = expr.impConversionExpr == null ? expr.type : expr.impConversionExpr.type;
types.setImplicitCastExpr(expr, expType, symTable.anyType);
});
tupleLiteral.exprs = rewriteExprs(tupleLiteral.exprs);
result = tupleLiteral;
}
@Override
public void visit(BLangGroupExpr groupExpr) {
if (groupExpr.isTypedescExpr) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = groupExpr.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
} else {
result = rewriteExpr(groupExpr.expression);
}
}
@Override
public void visit(BLangRecordLiteral recordLiteral) {
List<RecordLiteralNode.RecordField> fields = recordLiteral.fields;
fields.sort((v1, v2) -> Boolean.compare(isComputedKey(v1), isComputedKey(v2)));
result = rewriteExpr(rewriteMappingConstructor(recordLiteral));
}
@Override
public void visit(BLangSimpleVarRef varRefExpr) {
BLangSimpleVarRef genVarRefExpr = varRefExpr;
if (varRefExpr.pkgSymbol != null && varRefExpr.pkgSymbol.tag == SymTag.XMLNS) {
BLangXMLQName qnameExpr = new BLangXMLQName(varRefExpr.variableName);
qnameExpr.nsSymbol = (BXMLNSSymbol) varRefExpr.pkgSymbol;
qnameExpr.localname = varRefExpr.variableName;
qnameExpr.prefix = varRefExpr.pkgAlias;
qnameExpr.namespaceURI = qnameExpr.nsSymbol.namespaceURI;
qnameExpr.isUsedInXML = false;
qnameExpr.pos = varRefExpr.pos;
qnameExpr.type = symTable.stringType;
result = qnameExpr;
return;
}
if (varRefExpr.symbol == null) {
result = varRefExpr;
return;
}
if ((varRefExpr.symbol.tag & SymTag.VARIABLE) == SymTag.VARIABLE) {
BVarSymbol varSymbol = (BVarSymbol) varRefExpr.symbol;
if (varSymbol.originalSymbol != null) {
varRefExpr.symbol = varSymbol.originalSymbol;
}
}
BSymbol ownerSymbol = varRefExpr.symbol.owner;
if ((varRefExpr.symbol.tag & SymTag.FUNCTION) == SymTag.FUNCTION &&
varRefExpr.symbol.type.tag == TypeTags.INVOKABLE) {
genVarRefExpr = new BLangFunctionVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((varRefExpr.symbol.tag & SymTag.TYPE) == SymTag.TYPE &&
!((varRefExpr.symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT)) {
genVarRefExpr = new BLangTypeLoad(varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.INVOKABLE) == SymTag.INVOKABLE ||
(ownerSymbol.tag & SymTag.LET) == SymTag.LET) {
genVarRefExpr = new BLangLocalVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.STRUCT) == SymTag.STRUCT) {
genVarRefExpr = new BLangFieldVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.PACKAGE) == SymTag.PACKAGE ||
(ownerSymbol.tag & SymTag.SERVICE) == SymTag.SERVICE) {
if ((varRefExpr.symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT) {
BConstantSymbol constSymbol = (BConstantSymbol) varRefExpr.symbol;
if (constSymbol.literalType.tag <= TypeTags.BOOLEAN || constSymbol.literalType.tag == TypeTags.NIL) {
BLangLiteral literal = ASTBuilderUtil.createLiteral(varRefExpr.pos, constSymbol.literalType,
constSymbol.value.value);
result = rewriteExpr(addConversionExprIfRequired(literal, varRefExpr.type));
return;
}
}
genVarRefExpr = new BLangPackageVarRef((BVarSymbol) varRefExpr.symbol);
if (!enclLocks.isEmpty()) {
BVarSymbol symbol = (BVarSymbol) varRefExpr.symbol;
BLangLockStmt lockStmt = enclLocks.peek();
lockStmt.addLockVariable(symbol);
lockStmt.addLockVariable(this.globalVariablesDependsOn.getOrDefault(symbol, new HashSet<>()));
}
}
genVarRefExpr.type = varRefExpr.type;
genVarRefExpr.pos = varRefExpr.pos;
if ((varRefExpr.lhsVar)
|| genVarRefExpr.symbol.name.equals(IGNORE)) {
genVarRefExpr.lhsVar = varRefExpr.lhsVar;
genVarRefExpr.type = varRefExpr.symbol.type;
result = genVarRefExpr;
return;
}
genVarRefExpr.lhsVar = varRefExpr.lhsVar;
BType targetType = genVarRefExpr.type;
genVarRefExpr.type = genVarRefExpr.symbol.type;
BLangExpression expression = addConversionExprIfRequired(genVarRefExpr, targetType);
result = expression.impConversionExpr != null ? expression.impConversionExpr : expression;
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr) {
if (safeNavigate(fieldAccessExpr)) {
result = rewriteExpr(rewriteSafeNavigationExpr(fieldAccessExpr));
return;
}
BLangAccessExpression targetVarRef = fieldAccessExpr;
BType varRefType = fieldAccessExpr.expr.type;
fieldAccessExpr.expr = rewriteExpr(fieldAccessExpr.expr);
if (!types.isSameType(fieldAccessExpr.expr.type, varRefType)) {
fieldAccessExpr.expr = addConversionExprIfRequired(fieldAccessExpr.expr, varRefType);
}
BLangLiteral stringLit = createStringLiteral(fieldAccessExpr.field.pos,
StringEscapeUtils.unescapeJava(fieldAccessExpr.field.value));
int varRefTypeTag = varRefType.tag;
if (varRefTypeTag == TypeTags.OBJECT ||
(varRefTypeTag == TypeTags.UNION &&
((BUnionType) varRefType).getMemberTypes().iterator().next().tag == TypeTags.OBJECT)) {
if (fieldAccessExpr.symbol != null && fieldAccessExpr.symbol.type.tag == TypeTags.INVOKABLE &&
((fieldAccessExpr.symbol.flags & Flags.ATTACHED) == Flags.ATTACHED)) {
result = rewriteObjectMemberAccessAsField(fieldAccessExpr);
return;
} else {
boolean isStoreOnCreation = fieldAccessExpr.isStoreOnCreation;
if (!isStoreOnCreation && varRefTypeTag == TypeTags.OBJECT && env.enclInvokable != null) {
BInvokableSymbol originalFuncSymbol = ((BLangFunction) env.enclInvokable).originalFuncSymbol;
BObjectTypeSymbol objectTypeSymbol = (BObjectTypeSymbol) varRefType.tsymbol;
BAttachedFunction initializerFunc = objectTypeSymbol.initializerFunc;
BAttachedFunction generatedInitializerFunc = objectTypeSymbol.generatedInitializerFunc;
if ((generatedInitializerFunc != null && originalFuncSymbol == generatedInitializerFunc.symbol) ||
(initializerFunc != null && originalFuncSymbol == initializerFunc.symbol)) {
isStoreOnCreation = true;
}
}
targetVarRef = new BLangStructFieldAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
(BVarSymbol) fieldAccessExpr.symbol, false,
isStoreOnCreation);
}
} else if (varRefTypeTag == TypeTags.RECORD ||
(varRefTypeTag == TypeTags.UNION &&
((BUnionType) varRefType).getMemberTypes().iterator().next().tag == TypeTags.RECORD)) {
if (fieldAccessExpr.symbol != null && fieldAccessExpr.symbol.type.tag == TypeTags.INVOKABLE
&& ((fieldAccessExpr.symbol.flags & Flags.ATTACHED) == Flags.ATTACHED)) {
targetVarRef = new BLangStructFunctionVarRef(fieldAccessExpr.expr, (BVarSymbol) fieldAccessExpr.symbol);
} else {
targetVarRef = new BLangStructFieldAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
(BVarSymbol) fieldAccessExpr.symbol, false, fieldAccessExpr.isStoreOnCreation);
}
} else if (types.isLax(varRefType)) {
if (!(varRefType.tag == TypeTags.XML || varRefType.tag == TypeTags.XML_ELEMENT)) {
if (varRefType.tag == TypeTags.MAP && TypeTags.isXMLTypeTag(((BMapType) varRefType).constraint.tag)) {
result = rewriteExpr(rewriteLaxMapAccess(fieldAccessExpr));
return;
}
fieldAccessExpr.expr = addConversionExprIfRequired(fieldAccessExpr.expr, symTable.jsonType);
targetVarRef = new BLangJSONAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit);
} else {
targetVarRef = rewriteXMLAttributeOrElemNameAccess(fieldAccessExpr);
}
} else if (varRefTypeTag == TypeTags.MAP) {
targetVarRef = new BLangMapAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
fieldAccessExpr.isStoreOnCreation);
} else if (TypeTags.isXMLTypeTag(varRefTypeTag)) {
targetVarRef = new BLangXMLAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
fieldAccessExpr.fieldKind);
}
targetVarRef.lhsVar = fieldAccessExpr.lhsVar;
targetVarRef.type = fieldAccessExpr.type;
targetVarRef.optionalFieldAccess = fieldAccessExpr.optionalFieldAccess;
result = targetVarRef;
}
private BLangNode rewriteObjectMemberAccessAsField(BLangFieldBasedAccess fieldAccessExpr) {
Location pos = fieldAccessExpr.pos;
BInvokableSymbol originalMemberFuncSymbol = (BInvokableSymbol) fieldAccessExpr.symbol;
BLangFunction func = (BLangFunction) TreeBuilder.createFunctionNode();
String funcName = "$annon$method$delegate$" + lambdaFunctionCount++;
BInvokableSymbol funcSymbol = new BInvokableSymbol(SymTag.INVOKABLE, (Flags.ANONYMOUS | Flags.LAMBDA),
names.fromString(funcName),
env.enclPkg.packageID, originalMemberFuncSymbol.type, env.scope.owner, pos, VIRTUAL);
funcSymbol.retType = originalMemberFuncSymbol.retType;
funcSymbol.bodyExist = true;
funcSymbol.params = new ArrayList<>();
funcSymbol.scope = new Scope(funcSymbol);
func.pos = pos;
func.name = createIdentifier(pos, funcName);
func.flagSet.add(Flag.LAMBDA);
func.flagSet.add(Flag.ANONYMOUS);
func.body = (BLangBlockFunctionBody) TreeBuilder.createBlockFunctionBodyNode();
func.symbol = funcSymbol;
func.type = funcSymbol.type;
func.closureVarSymbols = new LinkedHashSet<>();
BLangExpression receiver = fieldAccessExpr.expr;
BLangSimpleVariableDef intermediateObjDef = null;
if (receiver.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol receiverSymbol = ((BLangVariableReference) receiver).symbol;
receiverSymbol.closure = true;
func.closureVarSymbols.add(new ClosureVarSymbol(receiverSymbol, pos));
} else {
BLangSimpleVariableDef varDef = createVarDef("$$temp$obj$" + annonVarCount++, receiver.type, receiver, pos);
intermediateObjDef = varDef;
varDef.var.symbol.closure = true;
env.scope.define(varDef.var.symbol.name, varDef.var.symbol);
BLangSimpleVarRef variableRef = createVariableRef(pos, varDef.var.symbol);
func.closureVarSymbols.add(new ClosureVarSymbol(varDef.var.symbol, pos));
receiver = variableRef;
}
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
for (BVarSymbol param : originalMemberFuncSymbol.params) {
BLangSimpleVariable fParam = (BLangSimpleVariable) TreeBuilder.createSimpleVariableNode();
fParam.symbol = new BVarSymbol(0, param.name, env.enclPkg.packageID, param.type, funcSymbol, pos,
VIRTUAL);
fParam.pos = pos;
fParam.name = createIdentifier(pos, param.name.value);
fParam.type = param.type;
func.requiredParams.add(fParam);
funcSymbol.params.add(fParam.symbol);
funcSymbol.scope.define(fParam.symbol.name, fParam.symbol);
BLangSimpleVarRef paramRef = createVariableRef(pos, fParam.symbol);
requiredArgs.add(paramRef);
}
ArrayList<BLangExpression> restArgs = new ArrayList<>();
if (originalMemberFuncSymbol.restParam != null) {
BLangSimpleVariable restParam = (BLangSimpleVariable) TreeBuilder.createSimpleVariableNode();
func.restParam = restParam;
BVarSymbol restSym = originalMemberFuncSymbol.restParam;
restParam.name = ASTBuilderUtil.createIdentifier(pos, restSym.name.value);
restParam.symbol = new BVarSymbol(0, restSym.name, env.enclPkg.packageID, restSym.type, funcSymbol, pos,
VIRTUAL);
restParam.pos = pos;
restParam.type = restSym.type;
funcSymbol.restParam = restParam.symbol;
funcSymbol.scope.define(restParam.symbol.name, restParam.symbol);
BLangSimpleVarRef restArg = createVariableRef(pos, restParam.symbol);
BLangRestArgsExpression restArgExpr = new BLangRestArgsExpression();
restArgExpr.expr = restArg;
restArgExpr.pos = pos;
restArgExpr.type = restSym.type;
restArgExpr.expectedType = restArgExpr.type;
restArgs.add(restArgExpr);
}
BLangIdentifier field = fieldAccessExpr.field;
BLangReturn retStmt = (BLangReturn) TreeBuilder.createReturnNode();
retStmt.expr = createObjectMethodInvocation(
receiver, field, fieldAccessExpr.symbol, requiredArgs, restArgs);
((BLangBlockFunctionBody) func.body).addStatement(retStmt);
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
lambdaFunction.function = func;
lambdaFunction.capturedClosureEnv = env.createClone();
env.enclPkg.functions.add(func);
env.enclPkg.topLevelNodes.add(func);
lambdaFunction.parent = env.enclInvokable;
lambdaFunction.type = func.type;
if (intermediateObjDef == null) {
return rewrite(lambdaFunction, env);
} else {
BLangStatementExpression expr = createStatementExpression(intermediateObjDef, rewrite(lambdaFunction, env));
expr.type = lambdaFunction.type;
return rewrite(expr, env);
}
}
private BLangInvocation createObjectMethodInvocation(BLangExpression receiver, BLangIdentifier field,
BSymbol invocableSymbol,
List<BLangExpression> requiredArgs,
List<BLangExpression> restArgs) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.name = field;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = receiver;
invocationNode.symbol = invocableSymbol;
invocationNode.type = ((BInvokableType) invocableSymbol.type).retType;
invocationNode.requiredArgs = requiredArgs;
invocationNode.restArgs = restArgs;
return invocationNode;
}
private BLangStatementExpression rewriteLaxMapAccess(BLangFieldBasedAccess fieldAccessExpr) {
BLangStatementExpression statementExpression = new BLangStatementExpression();
BLangBlockStmt block = new BLangBlockStmt();
statementExpression.stmt = block;
BUnionType fieldAccessType = BUnionType.create(null, fieldAccessExpr.type, symTable.errorType);
Location pos = fieldAccessExpr.pos;
BLangSimpleVariableDef result = createVarDef("$mapAccessResult$", fieldAccessType, null, pos);
block.addStatement(result);
BLangSimpleVarRef resultRef = ASTBuilderUtil.createVariableRef(pos, result.var.symbol);
resultRef.type = fieldAccessType;
statementExpression.type = fieldAccessType;
BLangLiteral mapIndex = ASTBuilderUtil.createLiteral(
fieldAccessExpr.field.pos, symTable.stringType, fieldAccessExpr.field.value);
BLangMapAccessExpr mapAccessExpr = new BLangMapAccessExpr(pos, fieldAccessExpr.expr, mapIndex);
BUnionType xmlOrNil = BUnionType.create(null, fieldAccessExpr.type, symTable.nilType);
mapAccessExpr.type = xmlOrNil;
BLangSimpleVariableDef mapResult = createVarDef("$mapAccess", xmlOrNil, mapAccessExpr, pos);
BLangSimpleVarRef mapResultRef = ASTBuilderUtil.createVariableRef(pos, mapResult.var.symbol);
block.addStatement(mapResult);
BLangIf ifStmt = ASTBuilderUtil.createIfStmt(pos, block);
BLangIsLikeExpr isLikeNilExpr = createIsLikeExpression(pos, mapResultRef, symTable.nilType);
ifStmt.expr = isLikeNilExpr;
BLangBlockStmt resultNilBody = new BLangBlockStmt();
ifStmt.body = resultNilBody;
BLangBlockStmt resultHasValueBody = new BLangBlockStmt();
ifStmt.elseStmt = resultHasValueBody;
BLangErrorConstructorExpr errorConstructorExpr =
(BLangErrorConstructorExpr) TreeBuilder.createErrorConstructorExpressionNode();
BSymbol symbol = symResolver.lookupMainSpaceSymbolInPackage(errorConstructorExpr.pos, env,
names.fromString(""), names.fromString("error"));
errorConstructorExpr.type = symbol.type;
List<BLangExpression> positionalArgs = new ArrayList<>();
List<BLangNamedArgsExpression> namedArgs = new ArrayList<>();
positionalArgs.add(createStringLiteral(pos, "{" + RuntimeConstants.MAP_LANG_LIB + "}InvalidKey"));
BLangNamedArgsExpression message = new BLangNamedArgsExpression();
message.name = ASTBuilderUtil.createIdentifier(pos, "key");
message.expr = createStringLiteral(pos, fieldAccessExpr.field.value);
namedArgs.add(message);
errorConstructorExpr.positionalArgs = positionalArgs;
errorConstructorExpr.namedArgs = namedArgs;
BLangSimpleVariableDef errorDef =
createVarDef("$_invalid_key_error", symTable.errorType, errorConstructorExpr, pos);
resultNilBody.addStatement(errorDef);
BLangSimpleVarRef errorRef = ASTBuilderUtil.createVariableRef(pos, errorDef.var.symbol);
BLangAssignment errorVarAssignment = ASTBuilderUtil.createAssignmentStmt(pos, resultNilBody);
errorVarAssignment.varRef = resultRef;
errorVarAssignment.expr = errorRef;
BLangAssignment mapResultAssignment = ASTBuilderUtil.createAssignmentStmt(
pos, resultHasValueBody);
mapResultAssignment.varRef = resultRef;
mapResultAssignment.expr = mapResultRef;
statementExpression.expr = resultRef;
return statementExpression;
}
private BLangAccessExpression rewriteXMLAttributeOrElemNameAccess(BLangFieldBasedAccess fieldAccessExpr) {
ArrayList<BLangExpression> args = new ArrayList<>();
String fieldName = fieldAccessExpr.field.value;
if (fieldAccessExpr.fieldKind == FieldKind.WITH_NS) {
BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess nsPrefixAccess =
(BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) fieldAccessExpr;
fieldName = createExpandedQName(nsPrefixAccess.nsSymbol.namespaceURI, fieldName);
}
if (fieldName.equals("_")) {
return createLanglibXMLInvocation(fieldAccessExpr.pos, XML_INTERNAL_GET_ELEMENT_NAME_NIL_LIFTING,
fieldAccessExpr.expr, new ArrayList<>(), new ArrayList<>());
}
BLangLiteral attributeNameLiteral = createStringLiteral(fieldAccessExpr.field.pos, fieldName);
args.add(attributeNameLiteral);
args.add(isOptionalAccessToLiteral(fieldAccessExpr));
return createLanglibXMLInvocation(fieldAccessExpr.pos, XML_INTERNAL_GET_ATTRIBUTE, fieldAccessExpr.expr, args,
new ArrayList<>());
}
private BLangExpression isOptionalAccessToLiteral(BLangFieldBasedAccess fieldAccessExpr) {
return rewrite(
createLiteral(fieldAccessExpr.pos, symTable.booleanType, fieldAccessExpr.isOptionalFieldAccess()), env);
}
private String createExpandedQName(String nsURI, String localName) {
return "{" + nsURI + "}" + localName;
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr) {
if (safeNavigate(indexAccessExpr)) {
result = rewriteExpr(rewriteSafeNavigationExpr(indexAccessExpr));
return;
}
BLangIndexBasedAccess targetVarRef = indexAccessExpr;
indexAccessExpr.indexExpr = rewriteExpr(indexAccessExpr.indexExpr);
BType varRefType = indexAccessExpr.expr.type;
indexAccessExpr.expr = rewriteExpr(indexAccessExpr.expr);
if (!types.isSameType(indexAccessExpr.expr.type, varRefType)) {
indexAccessExpr.expr = addConversionExprIfRequired(indexAccessExpr.expr, varRefType);
}
if (varRefType.tag == TypeTags.MAP) {
targetVarRef = new BLangMapAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr, indexAccessExpr.isStoreOnCreation);
} else if (types.isSubTypeOfMapping(types.getSafeType(varRefType, true, false))) {
targetVarRef = new BLangStructFieldAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr,
(BVarSymbol) indexAccessExpr.symbol, false);
} else if (types.isSubTypeOfList(varRefType)) {
targetVarRef = new BLangArrayAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (types.isAssignable(varRefType, symTable.stringType)) {
indexAccessExpr.expr = addConversionExprIfRequired(indexAccessExpr.expr, symTable.stringType);
targetVarRef = new BLangStringAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (TypeTags.isXMLTypeTag(varRefType.tag)) {
targetVarRef = new BLangXMLAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (varRefType.tag == TypeTags.TABLE) {
if (targetVarRef.indexExpr.getKind() == NodeKind.TABLE_MULTI_KEY) {
BLangTupleLiteral listConstructorExpr = new BLangTupleLiteral();
listConstructorExpr.exprs = ((BLangTableMultiKeyExpr) indexAccessExpr.indexExpr).multiKeyIndexExprs;
List<BType> memberTypes = new ArrayList<>();
((BLangTableMultiKeyExpr) indexAccessExpr.indexExpr).multiKeyIndexExprs.
forEach(expression -> memberTypes.add(expression.type));
listConstructorExpr.type = new BTupleType(memberTypes);
indexAccessExpr.indexExpr = listConstructorExpr;
}
targetVarRef = new BLangTableAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
}
targetVarRef.lhsVar = indexAccessExpr.lhsVar;
targetVarRef.type = indexAccessExpr.type;
result = targetVarRef;
}
@Override
public void visit(BLangTableMultiKeyExpr tableMultiKeyExpr) {
rewriteExprs(tableMultiKeyExpr.multiKeyIndexExprs);
result = tableMultiKeyExpr;
}
@Override
public void visit(BLangInvocation iExpr) {
rewriteInvocation(iExpr, false);
}
@Override
public void visit(BLangErrorConstructorExpr errorConstructorExpr) {
if (errorConstructorExpr.positionalArgs.size() == 1) {
errorConstructorExpr.positionalArgs.add(createNilLiteral());
}
errorConstructorExpr.positionalArgs.set(1,
addConversionExprIfRequired(errorConstructorExpr.positionalArgs.get(1), symTable.errorType));
rewriteExprs(errorConstructorExpr.positionalArgs);
BLangExpression errorDetail;
BLangRecordLiteral recordLiteral = ASTBuilderUtil.createEmptyRecordLiteral(errorConstructorExpr.pos,
((BErrorType) errorConstructorExpr.type).detailType);
if (errorConstructorExpr.namedArgs.isEmpty()) {
errorDetail = visitCloneReadonly(rewriteExpr(recordLiteral), recordLiteral.type);
} else {
for (BLangNamedArgsExpression namedArg : errorConstructorExpr.namedArgs) {
BLangRecordLiteral.BLangRecordKeyValueField member = new BLangRecordLiteral.BLangRecordKeyValueField();
member.key = new BLangRecordLiteral.BLangRecordKey(ASTBuilderUtil.createLiteral(namedArg.name.pos,
symTable.stringType, namedArg.name.value));
if (recordLiteral.type.tag == TypeTags.RECORD) {
member.valueExpr = addConversionExprIfRequired(namedArg.expr, symTable.anyType);
} else {
member.valueExpr = addConversionExprIfRequired(namedArg.expr, namedArg.expr.type);
}
recordLiteral.fields.add(member);
}
errorDetail = visitCloneReadonly(rewriteExpr(recordLiteral),
((BErrorType) errorConstructorExpr.type).detailType);
}
errorConstructorExpr.errorDetail = errorDetail;
result = errorConstructorExpr;
}
@Override
public void visit(BLangInvocation.BLangActionInvocation actionInvocation) {
if (!actionInvocation.async && actionInvocation.invokedInsideTransaction) {
transactionDesugar.startTransactionCoordinatorOnce(env, actionInvocation.pos);
}
rewriteInvocation(actionInvocation, actionInvocation.async);
}
private void rewriteInvocation(BLangInvocation invocation, boolean async) {
BLangInvocation invRef = invocation;
if (!enclLocks.isEmpty()) {
BLangLockStmt lock = enclLocks.peek();
lock.lockVariables.addAll(((BInvokableSymbol) invocation.symbol).dependentGlobalVars);
}
reorderArguments(invocation);
invocation.requiredArgs = rewriteExprs(invocation.requiredArgs);
fixNonRestArgTypeCastInTypeParamInvocation(invocation);
invocation.restArgs = rewriteExprs(invocation.restArgs);
annotationDesugar.defineStatementAnnotations(invocation.annAttachments, invocation.pos,
invocation.symbol.pkgID, invocation.symbol.owner, env);
if (invocation.functionPointerInvocation) {
visitFunctionPointerInvocation(invocation);
return;
}
invocation.expr = rewriteExpr(invocation.expr);
result = invRef;
BInvokableSymbol invSym = (BInvokableSymbol) invocation.symbol;
if (Symbols.isFlagOn(invSym.retType.flags, Flags.PARAMETERIZED)) {
BType retType = typeBuilder.build(invSym.retType);
invocation.type = retType;
}
if (invocation.expr == null) {
fixTypeCastInTypeParamInvocation(invocation, invRef);
if (invocation.exprSymbol == null) {
return;
}
invocation.expr = ASTBuilderUtil.createVariableRef(invocation.pos, invocation.exprSymbol);
invocation.expr = rewriteExpr(invocation.expr);
}
switch (invocation.expr.type.tag) {
case TypeTags.OBJECT:
case TypeTags.RECORD:
if (!invocation.langLibInvocation) {
List<BLangExpression> argExprs = new ArrayList<>(invocation.requiredArgs);
argExprs.add(0, invocation.expr);
BLangAttachedFunctionInvocation attachedFunctionInvocation =
new BLangAttachedFunctionInvocation(invocation.pos, argExprs, invocation.restArgs,
invocation.symbol, invocation.type, invocation.expr,
async);
attachedFunctionInvocation.name = invocation.name;
attachedFunctionInvocation.annAttachments = invocation.annAttachments;
result = invRef = attachedFunctionInvocation;
}
break;
}
fixTypeCastInTypeParamInvocation(invocation, invRef);
}
private void fixNonRestArgTypeCastInTypeParamInvocation(BLangInvocation iExpr) {
if (!iExpr.langLibInvocation) {
return;
}
List<BLangExpression> requiredArgs = iExpr.requiredArgs;
List<BVarSymbol> params = ((BInvokableSymbol) iExpr.symbol).params;
for (int i = 1; i < requiredArgs.size(); i++) {
requiredArgs.set(i, addConversionExprIfRequired(requiredArgs.get(i), params.get(i).type));
}
}
/* This function is a workaround and need improvement
* Notes for improvement :
* 1. Both arguments are same.
* 2. Due to current type param logic we put type param flag on the original type.
* 3. Error type having Cloneable type with type param flag, change expression type by this code.
* 4. using error type is a problem as Cloneable type is an typeparm eg: ExprBodiedFunctionTest
* added never to CloneableType type param
* @typeParam type
* CloneableType Cloneable|never;
*
*/
private void fixTypeCastInTypeParamInvocation(BLangInvocation iExpr, BLangInvocation genIExpr) {
var returnTypeOfInvokable = ((BInvokableSymbol) iExpr.symbol).retType;
if (iExpr.langLibInvocation || TypeParamAnalyzer.containsTypeParam(returnTypeOfInvokable)) {
BType originalInvType = genIExpr.type;
if (!genIExpr.async) {
genIExpr.type = returnTypeOfInvokable;
}
BLangExpression expr = addConversionExprIfRequired(genIExpr, originalInvType);
if (expr.getKind() == NodeKind.TYPE_CONVERSION_EXPR) {
this.result = expr;
return;
}
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode();
conversionExpr.expr = genIExpr;
conversionExpr.targetType = originalInvType;
conversionExpr.type = originalInvType;
conversionExpr.pos = genIExpr.pos;
this.result = conversionExpr;
}
}
private BLangLiteral createNilLiteral() {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = null;
literal.type = symTable.nilType;
return literal;
}
public void visit(BLangTypeInit typeInitExpr) {
if (typeInitExpr.type.tag == TypeTags.STREAM) {
result = rewriteExpr(desugarStreamTypeInit(typeInitExpr));
} else {
result = rewrite(desugarObjectTypeInit(typeInitExpr), env);
}
}
private BLangStatementExpression desugarObjectTypeInit(BLangTypeInit typeInitExpr) {
typeInitExpr.desugared = true;
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(typeInitExpr.pos);
BType objType = getObjectType(typeInitExpr.type);
BLangSimpleVariableDef objVarDef = createVarDef("$obj$", objType, typeInitExpr, typeInitExpr.pos);
objVarDef.var.name.pos = symTable.builtinPos;
BLangSimpleVarRef objVarRef = ASTBuilderUtil.createVariableRef(typeInitExpr.pos, objVarDef.var.symbol);
blockStmt.addStatement(objVarDef);
typeInitExpr.initInvocation.exprSymbol = objVarDef.var.symbol;
typeInitExpr.initInvocation.symbol = ((BObjectTypeSymbol) objType.tsymbol).generatedInitializerFunc.symbol;
if (typeInitExpr.initInvocation.type.tag == TypeTags.NIL) {
BLangExpressionStmt initInvExpr = ASTBuilderUtil.createExpressionStmt(typeInitExpr.pos, blockStmt);
initInvExpr.expr = typeInitExpr.initInvocation;
typeInitExpr.initInvocation.name.value = Names.GENERATED_INIT_SUFFIX.value;
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, objVarRef);
stmtExpr.type = objVarRef.symbol.type;
return stmtExpr;
}
BLangSimpleVariableDef initInvRetValVarDef = createVarDef("$temp$", typeInitExpr.initInvocation.type,
typeInitExpr.initInvocation, typeInitExpr.pos);
blockStmt.addStatement(initInvRetValVarDef);
BLangSimpleVariableDef resultVarDef = createVarDef("$result$", typeInitExpr.type, null, typeInitExpr.pos);
blockStmt.addStatement(resultVarDef);
BLangSimpleVarRef initRetValVarRefInCondition =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, initInvRetValVarDef.var.symbol);
BLangBlockStmt thenStmt = ASTBuilderUtil.createBlockStmt(symTable.builtinPos);
BLangTypeTestExpr isErrorTest =
ASTBuilderUtil.createTypeTestExpr(symTable.builtinPos, initRetValVarRefInCondition, getErrorTypeNode());
isErrorTest.type = symTable.booleanType;
BLangSimpleVarRef thenInitRetValVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, initInvRetValVarDef.var.symbol);
BLangSimpleVarRef thenResultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangAssignment errAssignment =
ASTBuilderUtil.createAssignmentStmt(symTable.builtinPos, thenResultVarRef, thenInitRetValVarRef);
thenStmt.addStatement(errAssignment);
BLangSimpleVarRef elseResultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangAssignment objAssignment =
ASTBuilderUtil.createAssignmentStmt(symTable.builtinPos, elseResultVarRef, objVarRef);
BLangBlockStmt elseStmt = ASTBuilderUtil.createBlockStmt(symTable.builtinPos);
elseStmt.addStatement(objAssignment);
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(symTable.builtinPos, isErrorTest, thenStmt, elseStmt);
blockStmt.addStatement(ifelse);
BLangSimpleVarRef resultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = resultVarRef.symbol.type;
return stmtExpr;
}
private BLangInvocation desugarStreamTypeInit(BLangTypeInit typeInitExpr) {
BInvokableSymbol symbol = (BInvokableSymbol) symTable.langInternalModuleSymbol.scope
.lookup(Names.CONSTRUCT_STREAM).symbol;
BType targetType = ((BStreamType) typeInitExpr.type).constraint;
BType errorType = ((BStreamType) typeInitExpr.type).error;
BType typedescType = new BTypedescType(targetType, symTable.typeDesc.tsymbol);
BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = targetType;
typedescExpr.type = typedescType;
List<BLangExpression> args = new ArrayList<>(Lists.of(typedescExpr));
if (!typeInitExpr.argsExpr.isEmpty()) {
args.add(typeInitExpr.argsExpr.get(0));
}
BLangInvocation streamConstructInvocation = ASTBuilderUtil.createInvocationExprForMethod(
typeInitExpr.pos, symbol, args, symResolver);
streamConstructInvocation.type = new BStreamType(TypeTags.STREAM, targetType, errorType, null);
return streamConstructInvocation;
}
private BLangSimpleVariableDef createVarDef(String name, BType type, BLangExpression expr,
Location location) {
BSymbol objSym = symResolver.lookupSymbolInMainSpace(env, names.fromString(name));
if (objSym == null || objSym == symTable.notFoundSymbol) {
objSym = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID, type,
this.env.scope.owner, location, VIRTUAL);
}
BLangSimpleVariable objVar = ASTBuilderUtil.createVariable(location, name, type, expr, (BVarSymbol) objSym);
BLangSimpleVariableDef objVarDef = ASTBuilderUtil.createVariableDef(location);
objVarDef.var = objVar;
objVarDef.type = objVar.type;
return objVarDef;
}
private BType getObjectType(BType type) {
if (type.tag == TypeTags.OBJECT) {
return type;
} else if (type.tag == TypeTags.UNION) {
return ((BUnionType) type).getMemberTypes().stream()
.filter(t -> t.tag == TypeTags.OBJECT)
.findFirst()
.orElse(symTable.noType);
}
throw new IllegalStateException("None object type '" + type.toString() + "' found in object init context");
}
BLangErrorType getErrorTypeNode() {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = symTable.errorType;
errorTypeNode.pos = symTable.builtinPos;
return errorTypeNode;
}
BLangErrorType getErrorOrNillTypeNode() {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = symTable.errorOrNilType;
return errorTypeNode;
}
@Override
public void visit(BLangTernaryExpr ternaryExpr) {
/*
* First desugar to if-else:
*
* T $result$;
* if () {
* $result$ = thenExpr;
* } else {
* $result$ = elseExpr;
* }
*
*/
BLangSimpleVariableDef resultVarDef = createVarDef("$ternary_result$", ternaryExpr.type, null, ternaryExpr.pos);
BLangBlockStmt thenBody = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos);
BLangBlockStmt elseBody = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos);
BLangSimpleVarRef thenResultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangAssignment thenAssignment =
ASTBuilderUtil.createAssignmentStmt(ternaryExpr.pos, thenResultVarRef, ternaryExpr.thenExpr);
thenBody.addStatement(thenAssignment);
BLangSimpleVarRef elseResultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangAssignment elseAssignment =
ASTBuilderUtil.createAssignmentStmt(ternaryExpr.pos, elseResultVarRef, ternaryExpr.elseExpr);
elseBody.addStatement(elseAssignment);
BLangSimpleVarRef resultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangIf ifElse = ASTBuilderUtil.createIfElseStmt(ternaryExpr.pos, ternaryExpr.expr, thenBody, elseBody);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos, Lists.of(resultVarDef, ifElse));
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = ternaryExpr.type;
result = rewriteExpr(stmtExpr);
}
@Override
public void visit(BLangWaitExpr waitExpr) {
if (waitExpr.getExpression().getKind() == NodeKind.BINARY_EXPR) {
waitExpr.exprList = collectAllBinaryExprs((BLangBinaryExpr) waitExpr.getExpression(), new ArrayList<>());
} else {
waitExpr.exprList = Collections.singletonList(rewriteExpr(waitExpr.getExpression()));
}
result = waitExpr;
}
private List<BLangExpression> collectAllBinaryExprs(BLangBinaryExpr binaryExpr, List<BLangExpression> exprs) {
visitBinaryExprOfWait(binaryExpr.lhsExpr, exprs);
visitBinaryExprOfWait(binaryExpr.rhsExpr, exprs);
return exprs;
}
private void visitBinaryExprOfWait(BLangExpression expr, List<BLangExpression> exprs) {
if (expr.getKind() == NodeKind.BINARY_EXPR) {
collectAllBinaryExprs((BLangBinaryExpr) expr, exprs);
} else {
expr = rewriteExpr(expr);
exprs.add(expr);
}
}
@Override
public void visit(BLangWaitForAllExpr waitExpr) {
waitExpr.keyValuePairs.forEach(keyValue -> {
if (keyValue.valueExpr != null) {
keyValue.valueExpr = rewriteExpr(keyValue.valueExpr);
} else {
keyValue.keyExpr = rewriteExpr(keyValue.keyExpr);
}
});
BLangExpression expr = new BLangWaitForAllExpr.BLangWaitLiteral(waitExpr.keyValuePairs, waitExpr.type);
result = rewriteExpr(expr);
}
@Override
public void visit(BLangTrapExpr trapExpr) {
trapExpr.expr = rewriteExpr(trapExpr.expr);
if (trapExpr.expr.type.tag != TypeTags.NIL) {
trapExpr.expr = addConversionExprIfRequired(trapExpr.expr, trapExpr.type);
}
result = trapExpr;
}
@Override
public void visit(BLangBinaryExpr binaryExpr) {
if (binaryExpr.opKind == OperatorKind.HALF_OPEN_RANGE || binaryExpr.opKind == OperatorKind.CLOSED_RANGE) {
if (binaryExpr.opKind == OperatorKind.HALF_OPEN_RANGE) {
binaryExpr.rhsExpr = getModifiedIntRangeEndExpr(binaryExpr.rhsExpr);
}
result = rewriteExpr(replaceWithIntRange(binaryExpr.pos, binaryExpr.lhsExpr, binaryExpr.rhsExpr));
return;
}
if (binaryExpr.opKind == OperatorKind.AND || binaryExpr.opKind == OperatorKind.OR) {
visitBinaryLogicalExpr(binaryExpr);
return;
}
OperatorKind binaryOpKind = binaryExpr.opKind;
if (binaryOpKind == OperatorKind.ADD || binaryOpKind == OperatorKind.SUB ||
binaryOpKind == OperatorKind.MUL || binaryOpKind == OperatorKind.DIV ||
binaryOpKind == OperatorKind.MOD || binaryOpKind == OperatorKind.BITWISE_AND ||
binaryOpKind == OperatorKind.BITWISE_OR || binaryOpKind == OperatorKind.BITWISE_XOR) {
checkByteTypeIncompatibleOperations(binaryExpr);
}
binaryExpr.lhsExpr = rewriteExpr(binaryExpr.lhsExpr);
binaryExpr.rhsExpr = rewriteExpr(binaryExpr.rhsExpr);
result = binaryExpr;
int rhsExprTypeTag = binaryExpr.rhsExpr.type.tag;
int lhsExprTypeTag = binaryExpr.lhsExpr.type.tag;
if (rhsExprTypeTag != lhsExprTypeTag && (binaryExpr.opKind == OperatorKind.EQUAL ||
binaryExpr.opKind == OperatorKind.NOT_EQUAL ||
binaryExpr.opKind == OperatorKind.REF_EQUAL ||
binaryExpr.opKind == OperatorKind.REF_NOT_EQUAL)) {
if (lhsExprTypeTag == TypeTags.INT && rhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, symTable.intType);
return;
}
if (lhsExprTypeTag == TypeTags.BYTE && rhsExprTypeTag == TypeTags.INT) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, symTable.intType);
return;
}
}
if (lhsExprTypeTag == rhsExprTypeTag) {
return;
}
if (TypeTags.isStringTypeTag(lhsExprTypeTag) && binaryExpr.opKind == OperatorKind.ADD) {
if (TypeTags.isXMLTypeTag(rhsExprTypeTag)) {
binaryExpr.lhsExpr = ASTBuilderUtil.createXMLTextLiteralNode(binaryExpr, binaryExpr.lhsExpr,
binaryExpr.lhsExpr.pos, symTable.xmlType);
return;
}
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (TypeTags.isStringTypeTag(rhsExprTypeTag) && binaryExpr.opKind == OperatorKind.ADD) {
if (TypeTags.isXMLTypeTag(lhsExprTypeTag)) {
binaryExpr.rhsExpr = ASTBuilderUtil.createXMLTextLiteralNode(binaryExpr, binaryExpr.rhsExpr,
binaryExpr.rhsExpr.pos, symTable.xmlType);
return;
}
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
return;
}
if (lhsExprTypeTag == TypeTags.DECIMAL) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (rhsExprTypeTag == TypeTags.DECIMAL) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
return;
}
if (lhsExprTypeTag == TypeTags.FLOAT) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (rhsExprTypeTag == TypeTags.FLOAT) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
}
}
private BLangInvocation replaceWithIntRange(Location location, BLangExpression lhsExpr,
BLangExpression rhsExpr) {
BInvokableSymbol symbol = (BInvokableSymbol) symTable.langInternalModuleSymbol.scope
.lookup(Names.CREATE_INT_RANGE).symbol;
BLangInvocation createIntRangeInvocation = ASTBuilderUtil.createInvocationExprForMethod(location, symbol,
new ArrayList<>(Lists.of(lhsExpr, rhsExpr)), symResolver);
createIntRangeInvocation.type = symTable.intRangeType;
return createIntRangeInvocation;
}
private void checkByteTypeIncompatibleOperations(BLangBinaryExpr binaryExpr) {
if (binaryExpr.expectedType == null) {
return;
}
int rhsExprTypeTag = binaryExpr.rhsExpr.type.tag;
int lhsExprTypeTag = binaryExpr.lhsExpr.type.tag;
if (rhsExprTypeTag != TypeTags.BYTE && lhsExprTypeTag != TypeTags.BYTE) {
return;
}
int resultTypeTag = binaryExpr.expectedType.tag;
if (resultTypeTag == TypeTags.INT) {
if (rhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.rhsExpr = addConversionExprIfRequired(binaryExpr.rhsExpr, symTable.intType);
}
if (lhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.lhsExpr = addConversionExprIfRequired(binaryExpr.lhsExpr, symTable.intType);
}
}
}
/**
* This method checks whether given binary expression is related to shift operation.
* If its true, then both lhs and rhs of the binary expression will be converted to 'int' type.
* <p>
* byte a = 12;
* byte b = 34;
* int i = 234;
* int j = -4;
* <p>
* true: where binary expression's expected type is 'int'
* int i1 = a >> b;
* int i2 = a << b;
* int i3 = a >> i;
* int i4 = a << i;
* int i5 = i >> j;
* int i6 = i << j;
*/
private boolean isBitwiseShiftOperation(BLangBinaryExpr binaryExpr) {
return binaryExpr.opKind == OperatorKind.BITWISE_LEFT_SHIFT ||
binaryExpr.opKind == OperatorKind.BITWISE_RIGHT_SHIFT ||
binaryExpr.opKind == OperatorKind.BITWISE_UNSIGNED_RIGHT_SHIFT;
}
public void visit(BLangElvisExpr elvisExpr) {
BLangMatchExpression matchExpr = ASTBuilderUtil.createMatchExpression(elvisExpr.lhsExpr);
matchExpr.patternClauses.add(getMatchNullPatternGivenExpression(elvisExpr.pos,
rewriteExpr(elvisExpr.rhsExpr)));
matchExpr.type = elvisExpr.type;
matchExpr.pos = elvisExpr.pos;
result = rewriteExpr(matchExpr);
}
@Override
public void visit(BLangUnaryExpr unaryExpr) {
if (OperatorKind.BITWISE_COMPLEMENT == unaryExpr.operator) {
rewriteBitwiseComplementOperator(unaryExpr);
return;
}
unaryExpr.expr = rewriteExpr(unaryExpr.expr);
result = unaryExpr;
}
/**
* This method desugar a bitwise complement (~) unary expressions into a bitwise xor binary expression as below.
* Example : ~a -> a ^ -1;
* ~ 11110011 -> 00001100
* 11110011 ^ 11111111 -> 00001100
*
* @param unaryExpr the bitwise complement expression
*/
private void rewriteBitwiseComplementOperator(BLangUnaryExpr unaryExpr) {
final Location pos = unaryExpr.pos;
final BLangBinaryExpr binaryExpr = (BLangBinaryExpr) TreeBuilder.createBinaryExpressionNode();
binaryExpr.pos = pos;
binaryExpr.opKind = OperatorKind.BITWISE_XOR;
binaryExpr.lhsExpr = unaryExpr.expr;
if (TypeTags.BYTE == unaryExpr.type.tag) {
binaryExpr.type = symTable.byteType;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(pos, symTable.byteType, 0xffL);
binaryExpr.opSymbol = (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.BITWISE_XOR,
symTable.byteType, symTable.byteType);
} else {
binaryExpr.type = symTable.intType;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(pos, symTable.intType, -1L);
binaryExpr.opSymbol = (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.BITWISE_XOR,
symTable.intType, symTable.intType);
}
result = rewriteExpr(binaryExpr);
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr) {
if (conversionExpr.typeNode == null && !conversionExpr.annAttachments.isEmpty()) {
result = rewriteExpr(conversionExpr.expr);
return;
}
conversionExpr.typeNode = rewrite(conversionExpr.typeNode, env);
if (types.isXMLExprCastableToString(conversionExpr.expr.type, conversionExpr.type)) {
result = convertXMLTextToString(conversionExpr);
return;
}
conversionExpr.expr = rewriteExpr(conversionExpr.expr);
result = conversionExpr;
}
private BLangExpression convertXMLTextToString(BLangTypeConversionExpr conversionExpr) {
BLangInvocation invocationNode = createLanglibXMLInvocation(conversionExpr.pos, XML_GET_CONTENT_OF_TEXT,
conversionExpr.expr, new ArrayList<>(), new ArrayList<>());
BLangSimpleVariableDef tempVarDef = createVarDef("$$__xml_string__$$",
conversionExpr.targetType, invocationNode, conversionExpr.pos);
BLangSimpleVarRef tempVarRef = ASTBuilderUtil.createVariableRef(conversionExpr.pos, tempVarDef.var.symbol);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(conversionExpr.pos);
blockStmt.addStatement(tempVarDef);
BLangStatementExpression stmtExpr = ASTBuilderUtil.createStatementExpression(blockStmt, tempVarRef);
stmtExpr.type = conversionExpr.type;
return rewrite(stmtExpr, env);
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction) {
env.enclPkg.lambdaFunctions.add(bLangLambdaFunction);
result = bLangLambdaFunction;
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction) {
BLangFunction bLangFunction = (BLangFunction) TreeBuilder.createFunctionNode();
bLangFunction.setName(bLangArrowFunction.functionName);
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
lambdaFunction.pos = bLangArrowFunction.pos;
bLangFunction.addFlag(Flag.LAMBDA);
lambdaFunction.function = bLangFunction;
BLangValueType returnType = (BLangValueType) TreeBuilder.createValueTypeNode();
returnType.type = bLangArrowFunction.body.expr.type;
bLangFunction.setReturnTypeNode(returnType);
bLangFunction.setBody(populateArrowExprBodyBlock(bLangArrowFunction));
bLangArrowFunction.params.forEach(bLangFunction::addParameter);
lambdaFunction.parent = bLangArrowFunction.parent;
lambdaFunction.type = bLangArrowFunction.funcType;
BLangFunction funcNode = lambdaFunction.function;
BInvokableSymbol funcSymbol = Symbols.createFunctionSymbol(Flags.asMask(funcNode.flagSet),
new Name(funcNode.name.value),
env.enclPkg.symbol.pkgID,
bLangArrowFunction.funcType,
env.enclEnv.enclVarSym, true,
bLangArrowFunction.pos, VIRTUAL);
SymbolEnv invokableEnv = SymbolEnv.createFunctionEnv(funcNode, funcSymbol.scope, env);
defineInvokableSymbol(funcNode, funcSymbol, invokableEnv);
List<BVarSymbol> paramSymbols = funcNode.requiredParams.stream().peek(varNode -> {
Scope enclScope = invokableEnv.scope;
varNode.symbol.kind = SymbolKind.FUNCTION;
varNode.symbol.owner = invokableEnv.scope.owner;
enclScope.define(varNode.symbol.name, varNode.symbol);
}).map(varNode -> varNode.symbol).collect(Collectors.toList());
funcSymbol.params = paramSymbols;
funcSymbol.restParam = getRestSymbol(funcNode);
funcSymbol.retType = funcNode.returnTypeNode.type;
List<BType> paramTypes = paramSymbols.stream().map(paramSym -> paramSym.type).collect(Collectors.toList());
funcNode.type = new BInvokableType(paramTypes, getRestType(funcSymbol), funcNode.returnTypeNode.type, null);
lambdaFunction.function.pos = bLangArrowFunction.pos;
lambdaFunction.function.body.pos = bLangArrowFunction.pos;
lambdaFunction.capturedClosureEnv = env;
rewrite(lambdaFunction.function, env);
env.enclPkg.addFunction(lambdaFunction.function);
bLangArrowFunction.function = lambdaFunction.function;
result = rewriteExpr(lambdaFunction);
}
private void defineInvokableSymbol(BLangInvokableNode invokableNode, BInvokableSymbol funcSymbol,
SymbolEnv invokableEnv) {
invokableNode.symbol = funcSymbol;
funcSymbol.scope = new Scope(funcSymbol);
invokableEnv.scope = funcSymbol.scope;
}
@Override
public void visit(BLangXMLQName xmlQName) {
result = xmlQName;
}
@Override
public void visit(BLangXMLAttribute xmlAttribute) {
xmlAttribute.name = rewriteExpr(xmlAttribute.name);
xmlAttribute.value = rewriteExpr(xmlAttribute.value);
result = xmlAttribute;
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral) {
xmlElementLiteral.startTagName = rewriteExpr(xmlElementLiteral.startTagName);
xmlElementLiteral.endTagName = rewriteExpr(xmlElementLiteral.endTagName);
xmlElementLiteral.modifiedChildren = rewriteExprs(xmlElementLiteral.modifiedChildren);
xmlElementLiteral.attributes = rewriteExprs(xmlElementLiteral.attributes);
Iterator<BLangXMLAttribute> attributesItr = xmlElementLiteral.attributes.iterator();
while (attributesItr.hasNext()) {
BLangXMLAttribute attribute = attributesItr.next();
if (!attribute.isNamespaceDeclr) {
continue;
}
BLangXMLNS xmlns;
if ((xmlElementLiteral.scope.owner.tag & SymTag.PACKAGE) == SymTag.PACKAGE) {
xmlns = new BLangPackageXMLNS();
} else {
xmlns = new BLangLocalXMLNS();
}
xmlns.namespaceURI = attribute.value.concatExpr;
xmlns.prefix = ((BLangXMLQName) attribute.name).localname;
xmlns.symbol = attribute.symbol;
xmlElementLiteral.inlineNamespaces.add(xmlns);
}
result = xmlElementLiteral;
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral) {
xmlTextLiteral.concatExpr = rewriteExpr(constructStringTemplateConcatExpression(xmlTextLiteral.textFragments));
result = xmlTextLiteral;
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral) {
xmlCommentLiteral.concatExpr = rewriteExpr(
constructStringTemplateConcatExpression(xmlCommentLiteral.textFragments));
result = xmlCommentLiteral;
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) {
xmlProcInsLiteral.target = rewriteExpr(xmlProcInsLiteral.target);
xmlProcInsLiteral.dataConcatExpr =
rewriteExpr(constructStringTemplateConcatExpression(xmlProcInsLiteral.dataFragments));
result = xmlProcInsLiteral;
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString) {
xmlQuotedString.concatExpr = rewriteExpr(
constructStringTemplateConcatExpression(xmlQuotedString.textFragments));
result = xmlQuotedString;
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral) {
result = rewriteExpr(constructStringTemplateConcatExpression(stringTemplateLiteral.exprs));
}
/**
* The raw template literal gets desugared to a type init expression. For each literal, a new object class type
* def is generated from the object type. The type init expression creates an instance of this generated object
* type. For example, consider the following statements:
* string name = "Pubudu";
* 'object:RawTemplate rt = `Hello ${name}!`;
*
* The raw template literal above is desugared to:
* type RawTemplate$Impl$0 object {
* public string[] strings = ["Hello ", "!"];
* public (any|error)[] insertions;
*
* function init((any|error)[] insertions) {
* self.insertions = insertions;
* }
* };
*
*
* 'object:RawTemplate rt = new RawTemplate$Impl$0([name]);
*
* @param rawTemplateLiteral The raw template literal to be desugared.
*/
@Override
public void visit(BLangRawTemplateLiteral rawTemplateLiteral) {
Location pos = rawTemplateLiteral.pos;
BObjectType objType = (BObjectType) rawTemplateLiteral.type;
BLangClassDefinition objClassDef =
desugarTemplateLiteralObjectTypedef(rawTemplateLiteral.strings, objType, pos);
BObjectType classObjType = (BObjectType) objClassDef.type;
BVarSymbol insertionsSym = classObjType.fields.get("insertions").symbol;
BLangListConstructorExpr insertionsList = ASTBuilderUtil.createListConstructorExpr(pos, insertionsSym.type);
insertionsList.exprs.addAll(rawTemplateLiteral.insertions);
insertionsList.expectedType = insertionsSym.type;
BLangTypeInit typeNewExpr = ASTBuilderUtil.createEmptyTypeInit(pos, classObjType);
typeNewExpr.argsExpr.add(insertionsList);
typeNewExpr.initInvocation.argExprs.add(insertionsList);
typeNewExpr.initInvocation.requiredArgs.add(insertionsList);
result = rewriteExpr(typeNewExpr);
}
/**
* This method desugars a raw template literal object class for the provided raw template object type as follows.
* A literal defined as 'object:RawTemplate rt = `Hello ${name}!`;
* is desugared to,
* type $anonType$0 object {
* public string[] strings = ["Hello ", "!"];
* public (any|error)[] insertions;
*
* function init((any|error)[] insertions) {
* self.insertions = insertions;
* }
* };
* @param strings The string portions of the literal
* @param objectType The abstract object type for which an object class needs to be generated
* @param pos The diagnostic position info for the type node
* @return Returns the generated concrete object class def
*/
private BLangClassDefinition desugarTemplateLiteralObjectTypedef(List<BLangLiteral> strings, BObjectType objectType,
Location pos) {
BObjectTypeSymbol tSymbol = (BObjectTypeSymbol) objectType.tsymbol;
Name objectClassName = names.fromString(
anonModelHelper.getNextRawTemplateTypeKey(env.enclPkg.packageID, tSymbol.name));
BObjectTypeSymbol classTSymbol = Symbols.createClassSymbol(tSymbol.flags, objectClassName,
env.enclPkg.packageID, null, env.enclPkg.symbol,
pos, VIRTUAL, false);
classTSymbol.flags |= Flags.CLASS;
BObjectType objectClassType = new BObjectType(classTSymbol, classTSymbol.flags);
objectClassType.fields = objectType.fields;
classTSymbol.type = objectClassType;
BLangClassDefinition classDef = TypeDefBuilderHelper.createClassDef(pos, classTSymbol, env);
classDef.name = ASTBuilderUtil.createIdentifier(pos, objectClassType.tsymbol.name.value);
BType stringsType = objectClassType.fields.get("strings").symbol.type;
BLangListConstructorExpr stringsList = ASTBuilderUtil.createListConstructorExpr(pos, stringsType);
stringsList.exprs.addAll(strings);
stringsList.expectedType = stringsType;
classDef.fields.get(0).expr = stringsList;
BLangFunction userDefinedInitFunction = createUserDefinedObjectInitFn(classDef, env);
classDef.initFunction = userDefinedInitFunction;
env.enclPkg.functions.add(userDefinedInitFunction);
env.enclPkg.topLevelNodes.add(userDefinedInitFunction);
BLangFunction tempGeneratedInitFunction = createGeneratedInitializerFunction(classDef, env);
tempGeneratedInitFunction.clonedEnv = SymbolEnv.createFunctionEnv(tempGeneratedInitFunction,
tempGeneratedInitFunction.symbol.scope, env);
this.semanticAnalyzer.analyzeNode(tempGeneratedInitFunction, env);
classDef.generatedInitFunction = tempGeneratedInitFunction;
env.enclPkg.functions.add(classDef.generatedInitFunction);
env.enclPkg.topLevelNodes.add(classDef.generatedInitFunction);
return rewrite(classDef, env);
}
/**
* Creates a user-defined init() method for the provided object type node. If there are fields without default
* values specified in the type node, this will add parameters for those fields in the init() method and assign the
* param values to the respective fields in the method body.
*
* @param classDefn The object type node for which the init() method is generated
* @param env The symbol env for the object type node
* @return The generated init() method
*/
private BLangFunction createUserDefinedObjectInitFn(BLangClassDefinition classDefn, SymbolEnv env) {
BLangFunction initFunction =
TypeDefBuilderHelper.createInitFunctionForStructureType(classDefn.pos, classDefn.symbol, env,
names, Names.USER_DEFINED_INIT_SUFFIX,
symTable, classDefn.type);
BObjectTypeSymbol typeSymbol = ((BObjectTypeSymbol) classDefn.type.tsymbol);
typeSymbol.initializerFunc = new BAttachedFunction(Names.USER_DEFINED_INIT_SUFFIX, initFunction.symbol,
(BInvokableType) initFunction.type, classDefn.pos);
classDefn.initFunction = initFunction;
initFunction.returnTypeNode.type = symTable.nilType;
BLangBlockFunctionBody initFuncBody = (BLangBlockFunctionBody) initFunction.body;
BInvokableType initFnType = (BInvokableType) initFunction.type;
for (BLangSimpleVariable field : classDefn.fields) {
if (field.expr != null) {
continue;
}
BVarSymbol fieldSym = field.symbol;
BVarSymbol paramSym = new BVarSymbol(Flags.FINAL, fieldSym.name, this.env.scope.owner.pkgID, fieldSym.type,
initFunction.symbol, classDefn.pos, VIRTUAL);
BLangSimpleVariable param = ASTBuilderUtil.createVariable(classDefn.pos, fieldSym.name.value,
fieldSym.type, null, paramSym);
param.flagSet.add(Flag.FINAL);
initFunction.symbol.scope.define(paramSym.name, paramSym);
initFunction.symbol.params.add(paramSym);
initFnType.paramTypes.add(param.type);
initFunction.requiredParams.add(param);
BLangSimpleVarRef paramRef = ASTBuilderUtil.createVariableRef(initFunction.pos, paramSym);
BLangAssignment fieldInit = createStructFieldUpdate(initFunction, paramRef, fieldSym, field.type,
initFunction.receiver.symbol, field.name);
initFuncBody.addStatement(fieldInit);
}
return initFunction;
}
@Override
public void visit(BLangWorkerSend workerSendNode) {
workerSendNode.expr = visitCloneInvocation(rewriteExpr(workerSendNode.expr), workerSendNode.expr.type);
result = workerSendNode;
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr) {
syncSendExpr.expr = visitCloneInvocation(rewriteExpr(syncSendExpr.expr), syncSendExpr.expr.type);
result = syncSendExpr;
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode) {
result = workerReceiveNode;
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr) {
workerFlushExpr.workerIdentifierList = workerFlushExpr.cachedWorkerSendStmts
.stream().map(send -> send.workerIdentifier).distinct().collect(Collectors.toList());
result = workerFlushExpr;
}
@Override
public void visit(BLangTransactionalExpr transactionalExpr) {
BInvokableSymbol isTransactionalSymbol =
(BInvokableSymbol) transactionDesugar.getInternalTransactionModuleInvokableSymbol(IS_TRANSACTIONAL);
result = ASTBuilderUtil
.createInvocationExprMethod(transactionalExpr.pos, isTransactionalSymbol, Collections.emptyList(),
Collections.emptyList(), symResolver);
}
@Override
public void visit(BLangCommitExpr commitExpr) {
BLangStatementExpression stmtExpr = transactionDesugar.desugar(commitExpr, env);
result = rewriteExpr(stmtExpr);
}
@Override
public void visit(BLangXMLAttributeAccess xmlAttributeAccessExpr) {
xmlAttributeAccessExpr.indexExpr = rewriteExpr(xmlAttributeAccessExpr.indexExpr);
xmlAttributeAccessExpr.expr = rewriteExpr(xmlAttributeAccessExpr.expr);
if (xmlAttributeAccessExpr.indexExpr != null
&& xmlAttributeAccessExpr.indexExpr.getKind() == NodeKind.XML_QNAME) {
((BLangXMLQName) xmlAttributeAccessExpr.indexExpr).isUsedInXML = true;
}
xmlAttributeAccessExpr.desugared = true;
if (xmlAttributeAccessExpr.lhsVar || xmlAttributeAccessExpr.indexExpr != null) {
result = xmlAttributeAccessExpr;
} else {
result = rewriteExpr(xmlAttributeAccessExpr);
}
}
@Override
public void visit(BLangFail failNode) {
if (this.onFailClause != null) {
if (this.onFailClause.bodyContainsFail) {
result = rewriteNestedOnFail(this.onFailClause, failNode);
} else {
BLangStatementExpression expression = createOnFailInvocation(onFailCallFuncDef, onFailClause,
failNode);
failNode.exprStmt = createExpressionStatement(failNode.pos, expression,
onFailClause.statementBlockReturns, env);
result = failNode;
}
} else {
BLangReturn stmt = ASTBuilderUtil.createReturnStmt(failNode.pos, rewrite(failNode.expr, env));
stmt.desugared = true;
result = stmt;
}
}
@Override
public void visit(BLangLocalVarRef localVarRef) {
result = localVarRef;
}
@Override
public void visit(BLangFieldVarRef fieldVarRef) {
result = fieldVarRef;
}
@Override
public void visit(BLangPackageVarRef packageVarRef) {
result = packageVarRef;
}
@Override
public void visit(BLangFunctionVarRef functionVarRef) {
result = functionVarRef;
}
@Override
public void visit(BLangStructFieldAccessExpr fieldAccessExpr) {
result = fieldAccessExpr;
}
@Override
public void visit(BLangStructFunctionVarRef functionVarRef) {
result = functionVarRef;
}
@Override
public void visit(BLangMapAccessExpr mapKeyAccessExpr) {
result = mapKeyAccessExpr;
}
@Override
public void visit(BLangArrayAccessExpr arrayIndexAccessExpr) {
result = arrayIndexAccessExpr;
}
@Override
public void visit(BLangTupleAccessExpr arrayIndexAccessExpr) {
result = arrayIndexAccessExpr;
}
@Override
public void visit(BLangTableAccessExpr tableKeyAccessExpr) {
result = tableKeyAccessExpr;
}
@Override
public void visit(BLangMapLiteral mapLiteral) {
result = mapLiteral;
}
@Override
public void visit(BLangStructLiteral structLiteral) {
result = structLiteral;
}
@Override
public void visit(BLangWaitForAllExpr.BLangWaitLiteral waitLiteral) {
result = waitLiteral;
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess) {
xmlElementAccess.expr = rewriteExpr(xmlElementAccess.expr);
ArrayList<BLangExpression> filters = expandFilters(xmlElementAccess.filters);
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlElementAccess.pos, XML_INTERNAL_GET_ELEMENTS,
xmlElementAccess.expr, new ArrayList<>(), filters);
result = rewriteExpr(invocationNode);
}
private ArrayList<BLangExpression> expandFilters(List<BLangXMLElementFilter> filters) {
Map<Name, BXMLNSSymbol> nameBXMLNSSymbolMap = symResolver.resolveAllNamespaces(env);
BXMLNSSymbol defaultNSSymbol = nameBXMLNSSymbolMap.get(names.fromString(XMLConstants.DEFAULT_NS_PREFIX));
String defaultNS = defaultNSSymbol != null ? defaultNSSymbol.namespaceURI : null;
ArrayList<BLangExpression> args = new ArrayList<>();
for (BLangXMLElementFilter filter : filters) {
BSymbol nsSymbol = symResolver.lookupSymbolInPrefixSpace(env, names.fromString(filter.namespace));
if (nsSymbol == symTable.notFoundSymbol) {
if (defaultNS != null && !filter.name.equals("*")) {
String expandedName = createExpandedQName(defaultNS, filter.name);
args.add(createStringLiteral(filter.elemNamePos, expandedName));
} else {
args.add(createStringLiteral(filter.elemNamePos, filter.name));
}
} else {
BXMLNSSymbol bxmlnsSymbol = (BXMLNSSymbol) nsSymbol;
String expandedName = createExpandedQName(bxmlnsSymbol.namespaceURI, filter.name);
BLangLiteral stringLiteral = createStringLiteral(filter.elemNamePos, expandedName);
args.add(stringLiteral);
}
}
return args;
}
private BLangInvocation createLanglibXMLInvocation(Location pos, String functionName,
BLangExpression invokeOnExpr,
ArrayList<BLangExpression> args,
ArrayList<BLangExpression> restArgs) {
invokeOnExpr = rewriteExpr(invokeOnExpr);
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = invokeOnExpr;
invocationNode.symbol = symResolver.lookupLangLibMethod(symTable.xmlType, names.fromString(functionName));
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.add(invokeOnExpr);
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.restArgs = rewriteExprs(restArgs);
invocationNode.type = ((BInvokableType) invocationNode.symbol.type).getReturnType();
invocationNode.langLibInvocation = true;
return invocationNode;
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation) {
xmlNavigation.expr = rewriteExpr(xmlNavigation.expr);
xmlNavigation.childIndex = rewriteExpr(xmlNavigation.childIndex);
ArrayList<BLangExpression> filters = expandFilters(xmlNavigation.filters);
if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.DESCENDANTS) {
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos,
XML_INTERNAL_SELECT_DESCENDANTS, xmlNavigation.expr, new ArrayList<>(), filters);
result = rewriteExpr(invocationNode);
} else if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.CHILDREN) {
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos, XML_INTERNAL_CHILDREN,
xmlNavigation.expr, new ArrayList<>(), new ArrayList<>());
result = rewriteExpr(invocationNode);
} else {
BLangExpression childIndexExpr;
if (xmlNavigation.childIndex == null) {
childIndexExpr = new BLangLiteral(Long.valueOf(-1), symTable.intType);
} else {
childIndexExpr = xmlNavigation.childIndex;
}
ArrayList<BLangExpression> args = new ArrayList<>();
args.add(rewriteExpr(childIndexExpr));
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos,
XML_INTERNAL_GET_FILTERED_CHILDREN_FLAT, xmlNavigation.expr, args, filters);
result = rewriteExpr(invocationNode);
}
}
@Override
public void visit(BLangIsAssignableExpr assignableExpr) {
assignableExpr.lhsExpr = rewriteExpr(assignableExpr.lhsExpr);
result = assignableExpr;
}
@Override
public void visit(BFunctionPointerInvocation fpInvocation) {
result = fpInvocation;
}
@Override
public void visit(BLangTypedescExpr typedescExpr) {
typedescExpr.typeNode = rewrite(typedescExpr.typeNode, env);
result = typedescExpr;
}
@Override
public void visit(BLangIntRangeExpression intRangeExpression) {
if (!intRangeExpression.includeStart) {
intRangeExpression.startExpr = getModifiedIntRangeStartExpr(intRangeExpression.startExpr);
}
if (!intRangeExpression.includeEnd) {
intRangeExpression.endExpr = getModifiedIntRangeEndExpr(intRangeExpression.endExpr);
}
intRangeExpression.startExpr = rewriteExpr(intRangeExpression.startExpr);
intRangeExpression.endExpr = rewriteExpr(intRangeExpression.endExpr);
result = intRangeExpression;
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression) {
result = rewriteExpr(bLangVarArgsExpression.expr);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression) {
bLangNamedArgsExpression.expr = rewriteExpr(bLangNamedArgsExpression.expr);
result = bLangNamedArgsExpression.expr;
}
@Override
public void visit(BLangMatchExpression bLangMatchExpression) {
addMatchExprDefaultCase(bLangMatchExpression);
String matchTempResultVarName = GEN_VAR_PREFIX.value + "temp_result";
BLangSimpleVariable tempResultVar =
ASTBuilderUtil.createVariable(bLangMatchExpression.pos, matchTempResultVarName,
bLangMatchExpression.type, null,
new BVarSymbol(0, names.fromString(matchTempResultVarName),
this.env.scope.owner.pkgID, bLangMatchExpression.type,
this.env.scope.owner, bLangMatchExpression.pos, VIRTUAL));
BLangSimpleVariableDef tempResultVarDef =
ASTBuilderUtil.createVariableDef(bLangMatchExpression.pos, tempResultVar);
tempResultVarDef.desugared = true;
BLangBlockStmt stmts = ASTBuilderUtil.createBlockStmt(bLangMatchExpression.pos, Lists.of(tempResultVarDef));
List<BLangMatchTypedBindingPatternClause> patternClauses = new ArrayList<>();
for (int i = 0; i < bLangMatchExpression.patternClauses.size(); i++) {
BLangMatchExprPatternClause pattern = bLangMatchExpression.patternClauses.get(i);
pattern.expr = rewriteExpr(pattern.expr);
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, tempResultVar.symbol);
pattern.expr = addConversionExprIfRequired(pattern.expr, tempResultVarRef.type);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(pattern.pos, tempResultVarRef, pattern.expr);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(pattern.pos, Lists.of(assignmentStmt));
patternClauses.add(ASTBuilderUtil.createMatchStatementPattern(pattern.pos, pattern.variable, patternBody));
}
stmts.addStatement(ASTBuilderUtil.createMatchStatement(bLangMatchExpression.pos, bLangMatchExpression.expr,
patternClauses));
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, tempResultVar.symbol);
BLangStatementExpression statementExpr = createStatementExpression(stmts, tempResultVarRef);
statementExpr.type = bLangMatchExpression.type;
result = rewriteExpr(statementExpr);
}
@Override
public void visit(BLangCheckedExpr checkedExpr) {
visitCheckAndCheckPanicExpr(checkedExpr, false);
}
@Override
public void visit(BLangCheckPanickedExpr checkedExpr) {
visitCheckAndCheckPanicExpr(checkedExpr, true);
}
private void visitCheckAndCheckPanicExpr(BLangCheckedExpr checkedExpr, boolean isCheckPanic) {
String checkedExprVarName = GEN_VAR_PREFIX.value;
BLangSimpleVariable checkedExprVar =
ASTBuilderUtil.createVariable(checkedExpr.pos, checkedExprVarName, checkedExpr.type, null,
new BVarSymbol(0, names.fromString(checkedExprVarName),
this.env.scope.owner.pkgID, checkedExpr.type,
this.env.scope.owner, checkedExpr.pos, VIRTUAL));
BLangSimpleVariableDef checkedExprVarDef = ASTBuilderUtil.createVariableDef(checkedExpr.pos, checkedExprVar);
checkedExprVarDef.desugared = true;
BLangMatchTypedBindingPatternClause patternSuccessCase =
getSafeAssignSuccessPattern(checkedExprVar.pos, checkedExprVar.symbol.type, true,
checkedExprVar.symbol, null);
BLangMatchTypedBindingPatternClause patternErrorCase =
getSafeAssignErrorPattern(checkedExpr.pos, this.env.scope.owner, checkedExpr.equivalentErrorTypeList,
isCheckPanic);
BLangMatch matchStmt = ASTBuilderUtil.createMatchStatement(checkedExpr.pos, checkedExpr.expr,
new ArrayList<BLangMatchTypedBindingPatternClause>() {{
add(patternSuccessCase);
add(patternErrorCase);
}});
BLangBlockStmt generatedStmtBlock = ASTBuilderUtil.createBlockStmt(checkedExpr.pos,
new ArrayList<BLangStatement>() {{
add(checkedExprVarDef);
add(matchStmt);
}});
BLangSimpleVarRef tempCheckedExprVarRef = ASTBuilderUtil.createVariableRef(
checkedExpr.pos, checkedExprVar.symbol);
BLangStatementExpression statementExpr = createStatementExpression(
generatedStmtBlock, tempCheckedExprVarRef);
statementExpr.type = checkedExpr.type;
result = rewriteExpr(statementExpr);
}
@Override
public void visit(BLangServiceConstructorExpr serviceConstructorExpr) {
final BLangTypeInit typeInit = ASTBuilderUtil.createEmptyTypeInit(serviceConstructorExpr.pos,
serviceConstructorExpr.serviceNode.serviceClass.symbol.type);
serviceConstructorExpr.serviceNode.annAttachments.forEach(attachment -> rewrite(attachment, env));
result = rewriteExpr(typeInit);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr) {
BLangExpression expr = typeTestExpr.expr;
if (types.isValueType(expr.type)) {
addConversionExprIfRequired(expr, symTable.anyType);
}
typeTestExpr.expr = rewriteExpr(expr);
typeTestExpr.typeNode = rewrite(typeTestExpr.typeNode, env);
result = typeTestExpr;
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr) {
BLangBinaryExpr binaryExpr = (BLangBinaryExpr) TreeBuilder.createBinaryExpressionNode();
binaryExpr.pos = annotAccessExpr.pos;
binaryExpr.opKind = OperatorKind.ANNOT_ACCESS;
binaryExpr.lhsExpr = annotAccessExpr.expr;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(annotAccessExpr.pkgAlias.pos, symTable.stringType,
annotAccessExpr.annotationSymbol.bvmAlias());
binaryExpr.type = annotAccessExpr.type;
binaryExpr.opSymbol = new BOperatorSymbol(names.fromString(OperatorKind.ANNOT_ACCESS.value()), null,
new BInvokableType(Lists.of(binaryExpr.lhsExpr.type,
binaryExpr.rhsExpr.type),
annotAccessExpr.type, null), null,
symTable.builtinPos, VIRTUAL);
result = rewriteExpr(binaryExpr);
}
@Override
public void visit(BLangIsLikeExpr isLikeExpr) {
isLikeExpr.expr = rewriteExpr(isLikeExpr.expr);
result = isLikeExpr;
}
@Override
public void visit(BLangStatementExpression bLangStatementExpression) {
bLangStatementExpression.expr = rewriteExpr(bLangStatementExpression.expr);
bLangStatementExpression.stmt = rewrite(bLangStatementExpression.stmt, env);
result = bLangStatementExpression;
}
@Override
public void visit(BLangQueryExpr queryExpr) {
BLangStatementExpression stmtExpr = queryDesugar.desugar(queryExpr, env);
result = rewrite(stmtExpr, env);
}
@Override
public void visit(BLangQueryAction queryAction) {
BLangStatementExpression stmtExpr = queryDesugar.desugar(queryAction, env);
result = rewrite(stmtExpr, env);
}
@Override
public void visit(BLangJSONArrayLiteral jsonArrayLiteral) {
jsonArrayLiteral.exprs = rewriteExprs(jsonArrayLiteral.exprs);
result = jsonArrayLiteral;
}
@Override
public void visit(BLangConstant constant) {
BConstantSymbol constSymbol = constant.symbol;
if (constSymbol.literalType.tag <= TypeTags.BOOLEAN || constSymbol.literalType.tag == TypeTags.NIL) {
if (constSymbol.literalType.tag != TypeTags.NIL && constSymbol.value.value == null) {
throw new IllegalStateException();
}
BLangLiteral literal = ASTBuilderUtil.createLiteral(constant.expr.pos, constSymbol.literalType,
constSymbol.value.value);
constant.expr = rewriteExpr(literal);
} else {
constant.expr = rewriteExpr(constant.expr);
}
constant.annAttachments.forEach(attachment -> rewrite(attachment, env));
result = constant;
}
@Override
public void visit(BLangIgnoreExpr ignoreExpr) {
result = ignoreExpr;
}
@Override
public void visit(BLangDynamicParamExpr dynamicParamExpr) {
dynamicParamExpr.conditionalArgument = rewriteExpr(dynamicParamExpr.conditionalArgument);
dynamicParamExpr.condition = rewriteExpr(dynamicParamExpr.condition);
result = dynamicParamExpr;
}
@Override
public void visit(BLangConstRef constantRef) {
result = ASTBuilderUtil.createLiteral(constantRef.pos, constantRef.type, constantRef.value);
}
BLangSimpleVariableDef getIteratorVariableDefinition(Location pos, BVarSymbol collectionSymbol,
BInvokableSymbol iteratorInvokableSymbol,
boolean isIteratorFuncFromLangLib) {
BLangSimpleVarRef dataReference = ASTBuilderUtil.createVariableRef(pos, collectionSymbol);
BLangInvocation iteratorInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
iteratorInvocation.pos = pos;
iteratorInvocation.expr = dataReference;
iteratorInvocation.symbol = iteratorInvokableSymbol;
iteratorInvocation.type = iteratorInvokableSymbol.retType;
iteratorInvocation.argExprs = Lists.of(dataReference);
iteratorInvocation.requiredArgs = iteratorInvocation.argExprs;
iteratorInvocation.langLibInvocation = isIteratorFuncFromLangLib;
BVarSymbol iteratorSymbol = new BVarSymbol(0, names.fromString("$iterator$"), this.env.scope.owner.pkgID,
iteratorInvokableSymbol.retType, this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable iteratorVariable = ASTBuilderUtil.createVariable(pos, "$iterator$",
iteratorInvokableSymbol.retType, iteratorInvocation, iteratorSymbol);
return ASTBuilderUtil.createVariableDef(pos, iteratorVariable);
}
BLangSimpleVariableDef getIteratorNextVariableDefinition(Location pos, BType nillableResultType,
BVarSymbol iteratorSymbol,
BVarSymbol resultSymbol) {
BLangInvocation nextInvocation = createIteratorNextInvocation(pos, iteratorSymbol);
BLangSimpleVariable resultVariable = ASTBuilderUtil.createVariable(pos, "$result$",
nillableResultType, nextInvocation, resultSymbol);
return ASTBuilderUtil.createVariableDef(pos, resultVariable);
}
BLangAssignment getIteratorNextAssignment(Location pos,
BVarSymbol iteratorSymbol, BVarSymbol resultSymbol) {
BLangSimpleVarRef resultReferenceInAssignment = ASTBuilderUtil.createVariableRef(pos, resultSymbol);
BLangInvocation nextInvocation = createIteratorNextInvocation(pos, iteratorSymbol);
nextInvocation.expr.type = types.getSafeType(nextInvocation.expr.type, true, false);
return ASTBuilderUtil.createAssignmentStmt(pos, resultReferenceInAssignment, nextInvocation, false);
}
BLangInvocation createIteratorNextInvocation(Location pos, BVarSymbol iteratorSymbol) {
BLangIdentifier nextIdentifier = ASTBuilderUtil.createIdentifier(pos, "next");
BLangSimpleVarRef iteratorReferenceInNext = ASTBuilderUtil.createVariableRef(pos, iteratorSymbol);
BInvokableSymbol nextFuncSymbol = getNextFunc((BObjectType) iteratorSymbol.type).symbol;
BLangInvocation nextInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
nextInvocation.pos = pos;
nextInvocation.name = nextIdentifier;
nextInvocation.expr = iteratorReferenceInNext;
nextInvocation.requiredArgs = Lists.of(ASTBuilderUtil.createVariableRef(pos, iteratorSymbol));
nextInvocation.argExprs = nextInvocation.requiredArgs;
nextInvocation.symbol = nextFuncSymbol;
nextInvocation.type = nextFuncSymbol.retType;
return nextInvocation;
}
private BAttachedFunction getNextFunc(BObjectType iteratorType) {
BObjectTypeSymbol iteratorSymbol = (BObjectTypeSymbol) iteratorType.tsymbol;
for (BAttachedFunction bAttachedFunction : iteratorSymbol.attachedFuncs) {
if (bAttachedFunction.funcName.value.equals("next")) {
return bAttachedFunction;
}
}
return null;
}
BLangFieldBasedAccess getValueAccessExpression(Location location, BType varType,
BVarSymbol resultSymbol) {
return getFieldAccessExpression(location, "value", varType, resultSymbol);
}
BLangFieldBasedAccess getFieldAccessExpression(Location pos, String fieldName, BType varType,
BVarSymbol resultSymbol) {
BLangSimpleVarRef resultReferenceInVariableDef = ASTBuilderUtil.createVariableRef(pos, resultSymbol);
BLangIdentifier valueIdentifier = ASTBuilderUtil.createIdentifier(pos, fieldName);
BLangFieldBasedAccess fieldBasedAccessExpression =
ASTBuilderUtil.createFieldAccessExpr(resultReferenceInVariableDef, valueIdentifier);
fieldBasedAccessExpression.pos = pos;
fieldBasedAccessExpression.type = varType;
fieldBasedAccessExpression.originalType = fieldBasedAccessExpression.type;
return fieldBasedAccessExpression;
}
private BlockFunctionBodyNode populateArrowExprBodyBlock(BLangArrowFunction bLangArrowFunction) {
BlockFunctionBodyNode blockNode = TreeBuilder.createBlockFunctionBodyNode();
BLangReturn returnNode = (BLangReturn) TreeBuilder.createReturnNode();
returnNode.pos = bLangArrowFunction.body.expr.pos;
returnNode.setExpression(bLangArrowFunction.body.expr);
blockNode.addStatement(returnNode);
return blockNode;
}
private BLangInvocation createInvocationNode(String functionName, List<BLangExpression> args, BType retType) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.symbol = symTable.rootScope.lookup(new Name(functionName)).symbol;
invocationNode.type = retType;
invocationNode.requiredArgs = args;
return invocationNode;
}
private BLangInvocation createLangLibInvocationNode(String functionName,
BLangExpression onExpr,
List<BLangExpression> args,
BType retType,
Location pos) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = onExpr;
invocationNode.symbol = symResolver.lookupLangLibMethod(onExpr.type, names.fromString(functionName));
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.add(onExpr);
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.type = retType != null ? retType : ((BInvokableSymbol) invocationNode.symbol).retType;
invocationNode.langLibInvocation = true;
return invocationNode;
}
private BLangInvocation createLangLibInvocationNode(String functionName,
List<BLangExpression> args,
BType retType,
Location pos) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.symbol = symResolver.lookupMethodInModule(symTable.langInternalModuleSymbol,
names.fromString(functionName), env);
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.type = retType != null ? retType : ((BInvokableSymbol) invocationNode.symbol).retType;
invocationNode.langLibInvocation = true;
return invocationNode;
}
private BLangArrayLiteral createArrayLiteralExprNode() {
BLangArrayLiteral expr = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
expr.exprs = new ArrayList<>();
expr.type = new BArrayType(symTable.anyType);
return expr;
}
private void visitFunctionPointerInvocation(BLangInvocation iExpr) {
BLangAccessExpression expr;
if (iExpr.expr == null) {
expr = new BLangSimpleVarRef();
} else {
BLangFieldBasedAccess fieldBasedAccess = new BLangFieldBasedAccess();
fieldBasedAccess.expr = iExpr.expr;
fieldBasedAccess.field = iExpr.name;
expr = fieldBasedAccess;
}
expr.symbol = iExpr.symbol;
expr.type = iExpr.symbol.type;
BLangExpression rewritten = rewriteExpr(expr);
result = new BFunctionPointerInvocation(iExpr, rewritten);
}
private BLangExpression visitCloneInvocation(BLangExpression expr, BType lhsType) {
if (types.isValueType(expr.type)) {
return expr;
}
if (expr.type.tag == TypeTags.ERROR) {
return expr;
}
BLangInvocation cloneInvok = createLangLibInvocationNode("clone", expr, new ArrayList<>(), null, expr.pos);
return addConversionExprIfRequired(cloneInvok, lhsType);
}
private BLangExpression visitCloneReadonly(BLangExpression expr, BType lhsType) {
if (types.isValueType(expr.type)) {
return expr;
}
if (expr.type.tag == TypeTags.ERROR) {
return expr;
}
BLangInvocation cloneInvok = createLangLibInvocationNode("cloneReadOnly", expr, new ArrayList<>(), expr.type,
expr.pos);
return addConversionExprIfRequired(cloneInvok, lhsType);
}
@SuppressWarnings("unchecked")
<E extends BLangNode> E rewrite(E node, SymbolEnv env) {
if (node == null) {
return null;
}
if (node.desugared) {
return node;
}
SymbolEnv previousEnv = this.env;
this.env = env;
node.accept(this);
BLangNode resultNode = this.result;
this.result = null;
resultNode.desugared = true;
this.env = previousEnv;
return (E) resultNode;
}
@SuppressWarnings("unchecked")
<E extends BLangExpression> E rewriteExpr(E node) {
if (node == null) {
return null;
}
if (node.desugared) {
return node;
}
BLangExpression expr = node;
if (node.impConversionExpr != null) {
expr = node.impConversionExpr;
node.impConversionExpr = null;
}
expr.accept(this);
BLangNode resultNode = this.result;
this.result = null;
resultNode.desugared = true;
return (E) resultNode;
}
@SuppressWarnings("unchecked")
<E extends BLangStatement> E rewrite(E statement, SymbolEnv env) {
if (statement == null) {
return null;
}
BLangStatementLink link = new BLangStatementLink();
link.parent = currentLink;
currentLink = link;
BLangStatement stmt = (BLangStatement) rewrite((BLangNode) statement, env);
link.statement = stmt;
stmt.statementLink = link;
currentLink = link.parent;
return (E) stmt;
}
private <E extends BLangStatement> List<E> rewriteStmt(List<E> nodeList, SymbolEnv env) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewrite(nodeList.get(i), env));
}
return nodeList;
}
private <E extends BLangNode> List<E> rewrite(List<E> nodeList, SymbolEnv env) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewrite(nodeList.get(i), env));
}
return nodeList;
}
private <E extends BLangExpression> List<E> rewriteExprs(List<E> nodeList) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewriteExpr(nodeList.get(i)));
}
return nodeList;
}
private BLangLiteral createStringLiteral(Location pos, String value) {
BLangLiteral stringLit = new BLangLiteral(value, symTable.stringType);
stringLit.pos = pos;
return stringLit;
}
private BLangLiteral createIntLiteral(long value) {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = value;
literal.type = symTable.intType;
return literal;
}
private BLangLiteral createByteLiteral(Location pos, Byte value) {
BLangLiteral byteLiteral = new BLangLiteral(Byte.toUnsignedInt(value), symTable.byteType);
byteLiteral.pos = pos;
return byteLiteral;
}
private BLangExpression createTypeCastExpr(BLangExpression expr, BType targetType) {
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode();
conversionExpr.pos = expr.pos;
conversionExpr.expr = expr;
conversionExpr.type = targetType;
conversionExpr.targetType = targetType;
conversionExpr.internal = true;
return conversionExpr;
}
private BType getElementType(BType type) {
if (type.tag != TypeTags.ARRAY) {
return type;
}
return getElementType(((BArrayType) type).getElementType());
}
private void addReturnIfNotPresent(BLangInvokableNode invokableNode) {
if (Symbols.isNative(invokableNode.symbol) ||
(invokableNode.hasBody() && invokableNode.body.getKind() != NodeKind.BLOCK_FUNCTION_BODY)) {
return;
}
BLangBlockFunctionBody funcBody = (BLangBlockFunctionBody) invokableNode.body;
boolean isNeverOrNilableReturn = invokableNode.symbol.type.getReturnType().tag == TypeTags.NEVER ||
invokableNode.symbol.type.getReturnType().isNullable();
if (invokableNode.workers.size() == 0 && isNeverOrNilableReturn && (funcBody.stmts.size() < 1 ||
funcBody.stmts.get(funcBody.stmts.size() - 1).getKind() != NodeKind.RETURN)) {
Location invPos = invokableNode.pos;
Location returnStmtPos = new BLangDiagnosticLocation(invPos.lineRange().filePath(),
invPos.lineRange().endLine().line(),
invPos.lineRange().endLine().line(),
invPos.lineRange().startLine().offset(),
invPos.lineRange().startLine().offset());
BLangReturn returnStmt = ASTBuilderUtil.createNilReturnStmt(returnStmtPos, symTable.nilType);
funcBody.addStatement(returnStmt);
}
}
/**
* Reorder the invocation arguments to match the original function signature.
*
* @param iExpr Function invocation expressions to reorder the arguments
*/
private void reorderArguments(BLangInvocation iExpr) {
BSymbol symbol = iExpr.symbol;
if (symbol == null || symbol.type.tag != TypeTags.INVOKABLE) {
return;
}
BInvokableSymbol invokableSymbol = (BInvokableSymbol) symbol;
List<BLangExpression> restArgs = iExpr.restArgs;
int originalRequiredArgCount = iExpr.requiredArgs.size();
BLangSimpleVarRef varargRef = null;
BLangBlockStmt blockStmt = null;
BType varargVarType = null;
int restArgCount = restArgs.size();
if (restArgCount > 0 &&
restArgs.get(restArgCount - 1).getKind() == NodeKind.REST_ARGS_EXPR &&
originalRequiredArgCount < invokableSymbol.params.size()) {
BLangExpression expr = ((BLangRestArgsExpression) restArgs.get(restArgCount - 1)).expr;
Location varargExpPos = expr.pos;
varargVarType = expr.type;
String varargVarName = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varargVarSymbol = new BVarSymbol(0, names.fromString(varargVarName), this.env.scope.owner.pkgID,
varargVarType, this.env.scope.owner, varargExpPos, VIRTUAL);
varargRef = ASTBuilderUtil.createVariableRef(varargExpPos, varargVarSymbol);
BLangSimpleVariable var = createVariable(varargExpPos, varargVarName, varargVarType, expr, varargVarSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(varargExpPos);
varDef.var = var;
varDef.type = varargVarType;
blockStmt = createBlockStmt(varargExpPos);
blockStmt.stmts.add(varDef);
}
if (!invokableSymbol.params.isEmpty()) {
reorderNamedArgs(iExpr, invokableSymbol, varargRef);
}
if (restArgCount == 0 || restArgs.get(restArgCount - 1).getKind() != NodeKind.REST_ARGS_EXPR) {
if (invokableSymbol.restParam == null) {
return;
}
BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
List<BLangExpression> exprs = new ArrayList<>();
BArrayType arrayType = (BArrayType) invokableSymbol.restParam.type;
BType elemType = arrayType.eType;
for (BLangExpression restArg : restArgs) {
exprs.add(addConversionExprIfRequired(restArg, elemType));
}
arrayLiteral.exprs = exprs;
arrayLiteral.type = arrayType;
if (restArgCount != 0) {
iExpr.restArgs = new ArrayList<>();
}
iExpr.restArgs.add(arrayLiteral);
return;
}
if (restArgCount == 1 && restArgs.get(0).getKind() == NodeKind.REST_ARGS_EXPR) {
if (iExpr.requiredArgs.size() == originalRequiredArgCount) {
return;
}
BLangExpression firstNonRestArg = iExpr.requiredArgs.remove(0);
BLangStatementExpression stmtExpression = createStatementExpression(blockStmt, firstNonRestArg);
stmtExpression.type = firstNonRestArg.type;
iExpr.requiredArgs.add(0, stmtExpression);
if (invokableSymbol.restParam == null) {
restArgs.remove(0);
return;
}
BLangRestArgsExpression restArgsExpression = (BLangRestArgsExpression) restArgs.remove(0);
BArrayType restParamType = (BArrayType) invokableSymbol.restParam.type;
if (restArgsExpression.type.tag == TypeTags.RECORD) {
BLangExpression expr = new BLangIgnoreExpr();
expr.type = restParamType;
restArgs.add(expr);
return;
}
Location pos = restArgsExpression.pos;
BLangArrayLiteral newArrayLiteral = createArrayLiteralExprNode();
newArrayLiteral.type = restParamType;
String name = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID,
restParamType, this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef arrayVarRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
BLangSimpleVariable var = createVariable(pos, name, restParamType, newArrayLiteral, varSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(pos);
varDef.var = var;
varDef.type = restParamType;
BLangLiteral startIndex = createIntLiteral(invokableSymbol.params.size() - originalRequiredArgCount);
BLangInvocation lengthInvocation = createLengthInvocation(pos, varargRef);
BLangInvocation intRangeInvocation = replaceWithIntRange(pos, startIndex,
getModifiedIntRangeEndExpr(lengthInvocation));
BLangForeach foreach = (BLangForeach) TreeBuilder.createForeachNode();
foreach.pos = pos;
foreach.collection = intRangeInvocation;
types.setForeachTypedBindingPatternType(foreach);
final BLangSimpleVariable foreachVariable = ASTBuilderUtil.createVariable(pos, "$foreach$i",
foreach.varType);
foreachVariable.symbol = new BVarSymbol(0, names.fromIdNode(foreachVariable.name),
this.env.scope.owner.pkgID, foreachVariable.type,
this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef foreachVarRef = ASTBuilderUtil.createVariableRef(pos, foreachVariable.symbol);
foreach.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos, foreachVariable);
foreach.isDeclaredWithVar = true;
BLangBlockStmt foreachBody = ASTBuilderUtil.createBlockStmt(pos);
BLangIndexBasedAccess valueExpr = ASTBuilderUtil.createIndexAccessExpr(varargRef, foreachVarRef);
valueExpr.type = varargVarType.tag == TypeTags.ARRAY ? ((BArrayType) varargVarType).eType :
symTable.anyType;
BLangExpression pushExpr = addConversionExprIfRequired(valueExpr, restParamType.eType);
BLangExpressionStmt expressionStmt = createExpressionStmt(pos, foreachBody);
BLangInvocation pushInvocation = createLangLibInvocationNode(PUSH_LANGLIB_METHOD, arrayVarRef,
new ArrayList<BLangExpression>() {{
add(pushExpr);
}}, restParamType, pos);
pushInvocation.restArgs.add(pushInvocation.requiredArgs.remove(1));
expressionStmt.expr = pushInvocation;
foreach.body = foreachBody;
BLangBlockStmt newArrayBlockStmt = createBlockStmt(pos);
newArrayBlockStmt.addStatement(varDef);
newArrayBlockStmt.addStatement(foreach);
BLangStatementExpression newArrayStmtExpression = createStatementExpression(newArrayBlockStmt, arrayVarRef);
newArrayStmtExpression.type = restParamType;
restArgs.add(addConversionExprIfRequired(newArrayStmtExpression, restParamType));
return;
}
BArrayType restParamType = (BArrayType) invokableSymbol.restParam.type;
BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
arrayLiteral.type = restParamType;
BType elemType = restParamType.eType;
Location pos = restArgs.get(0).pos;
List<BLangExpression> exprs = new ArrayList<>();
for (int i = 0; i < restArgCount - 1; i++) {
exprs.add(addConversionExprIfRequired(restArgs.get(i), elemType));
}
arrayLiteral.exprs = exprs;
BLangRestArgsExpression pushRestArgsExpr = (BLangRestArgsExpression) TreeBuilder.createVarArgsNode();
pushRestArgsExpr.pos = pos;
pushRestArgsExpr.expr = restArgs.remove(restArgCount - 1);
String name = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID, restParamType,
this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef arrayVarRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
BLangSimpleVariable var = createVariable(pos, name, restParamType, arrayLiteral, varSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(pos);
varDef.var = var;
varDef.type = restParamType;
BLangBlockStmt pushBlockStmt = createBlockStmt(pos);
pushBlockStmt.stmts.add(varDef);
BLangExpressionStmt expressionStmt = createExpressionStmt(pos, pushBlockStmt);
BLangInvocation pushInvocation = createLangLibInvocationNode(PUSH_LANGLIB_METHOD, arrayVarRef,
new ArrayList<BLangExpression>() {{
add(pushRestArgsExpr);
}}, restParamType, pos);
pushInvocation.restArgs.add(pushInvocation.requiredArgs.remove(1));
expressionStmt.expr = pushInvocation;
BLangStatementExpression stmtExpression = createStatementExpression(pushBlockStmt, arrayVarRef);
stmtExpression.type = restParamType;
iExpr.restArgs = new ArrayList<BLangExpression>(1) {{ add(stmtExpression); }};
}
private void reorderNamedArgs(BLangInvocation iExpr, BInvokableSymbol invokableSymbol, BLangExpression varargRef) {
List<BLangExpression> args = new ArrayList<>();
Map<String, BLangExpression> namedArgs = new HashMap<>();
iExpr.requiredArgs.stream()
.filter(expr -> expr.getKind() == NodeKind.NAMED_ARGS_EXPR)
.forEach(expr -> namedArgs.put(((NamedArgNode) expr).getName().value, expr));
List<BVarSymbol> params = invokableSymbol.params;
List<BLangRecordLiteral> incRecordLiterals = new ArrayList<>();
BLangRecordLiteral incRecordParamAllowAdditionalFields = null;
int varargIndex = 0;
BType varargType = null;
boolean tupleTypedVararg = false;
if (varargRef != null) {
varargType = varargRef.type;
tupleTypedVararg = varargType.tag == TypeTags.TUPLE;
}
for (int i = 0; i < params.size(); i++) {
BVarSymbol param = params.get(i);
if (iExpr.requiredArgs.size() > i && iExpr.requiredArgs.get(i).getKind() != NodeKind.NAMED_ARGS_EXPR) {
args.add(iExpr.requiredArgs.get(i));
} else if (namedArgs.containsKey(param.name.value)) {
args.add(namedArgs.remove(param.name.value));
} else if (param.getFlags().contains(Flag.INCLUDED)) {
BLangRecordLiteral recordLiteral = (BLangRecordLiteral) TreeBuilder.createRecordLiteralNode();
BType paramType = param.type;
recordLiteral.type = paramType;
args.add(recordLiteral);
incRecordLiterals.add(recordLiteral);
if (((BRecordType) paramType).restFieldType != symTable.noType) {
incRecordParamAllowAdditionalFields = recordLiteral;
}
} else if (varargRef == null) {
BLangExpression expr = new BLangIgnoreExpr();
expr.type = param.type;
args.add(expr);
} else {
BLangExpression indexExpr;
if (varargRef.type.tag == TypeTags.RECORD) {
if (param.defaultableParam) {
BLangInvocation hasKeyInvocation = createLangLibInvocationNode(HAS_KEY, varargRef,
List.of(createStringLiteral(param.pos, param.name.value)), null, varargRef.pos);
indexExpr = rewriteExpr(createStringLiteral(param.pos, param.name.value));
BLangIndexBasedAccess memberAccessExpr = createMemberAccessExprNode(param.type,
varargRef, indexExpr, varargRef.pos);
BLangExpression ignoreExpr = createIgnoreExprNode(param.type);
BLangTernaryExpr ternaryExpr = createTernaryExprNode(param.type, hasKeyInvocation,
memberAccessExpr, ignoreExpr, varargRef.pos);
args.add(createDynamicParamExpression(hasKeyInvocation, ternaryExpr));
} else {
BLangFieldBasedAccess fieldBasedAccessExpression = ASTBuilderUtil.createFieldAccessExpr(
varargRef, ASTBuilderUtil.createIdentifier(param.pos, param.name.value));
fieldBasedAccessExpression.type = param.type;
args.add(fieldBasedAccessExpression);
}
} else {
indexExpr = rewriteExpr(createIntLiteral(varargIndex));
BType memberAccessExprType = tupleTypedVararg ?
((BTupleType) varargType).tupleTypes.get(varargIndex) : ((BArrayType) varargType).eType;
args.add(addConversionExprIfRequired(createMemberAccessExprNode(memberAccessExprType, varargRef,
indexExpr, varargRef.pos), param.type));
varargIndex++;
}
}
}
if (namedArgs.size() > 0) {
setFieldsForIncRecordLiterals(namedArgs, incRecordLiterals, incRecordParamAllowAdditionalFields);
}
iExpr.requiredArgs = args;
}
private BLangDynamicParamExpr createDynamicParamExpression(BLangExpression condition,
BLangExpression conditionalArg) {
BLangDynamicParamExpr dymaicExpression = new BLangDynamicParamExpr();
dymaicExpression.condition = condition;
dymaicExpression.conditionalArgument = conditionalArg;
return dymaicExpression;
}
private BLangTernaryExpr createTernaryExprNode(BType type, BLangExpression expr, BLangExpression thenExpr,
BLangExpression elseExpr, Location pos) {
BLangTernaryExpr ternaryExpr = (BLangTernaryExpr) TreeBuilder.createTernaryExpressionNode();
ternaryExpr.pos = pos;
ternaryExpr.elseExpr = elseExpr;
ternaryExpr.thenExpr = thenExpr;
ternaryExpr.expr = expr;
ternaryExpr.type = type;
return ternaryExpr;
}
private BLangIndexBasedAccess createMemberAccessExprNode(BType type, BLangExpression expr,
BLangExpression indexExpr, Location pos) {
BLangIndexBasedAccess memberAccessExpr = (BLangIndexBasedAccess) TreeBuilder.createIndexBasedAccessNode();
memberAccessExpr.pos = pos;
memberAccessExpr.expr = expr;
memberAccessExpr.indexExpr = indexExpr;
memberAccessExpr.type = type;
return memberAccessExpr;
}
private BLangExpression createIgnoreExprNode(BType type) {
BLangExpression ignoreExpr = new BLangIgnoreExpr();
ignoreExpr.type = type;
return ignoreExpr;
}
private void setFieldsForIncRecordLiterals(Map<String, BLangExpression> namedArgs,
List<BLangRecordLiteral> incRecordLiterals,
BLangRecordLiteral incRecordParamAllowAdditionalFields) {
for (String name : namedArgs.keySet()) {
boolean isAdditionalField = true;
BLangNamedArgsExpression expr = (BLangNamedArgsExpression) namedArgs.get(name);
for (BLangRecordLiteral recordLiteral : incRecordLiterals) {
LinkedHashMap<String, BField> fields = ((BRecordType) recordLiteral.type).fields;
if (fields.containsKey(name) && fields.get(name).type.tag != TypeTags.NEVER) {
isAdditionalField = false;
createAndAddRecordFieldForIncRecordLiteral(recordLiteral, expr);
break;
}
}
if (isAdditionalField) {
createAndAddRecordFieldForIncRecordLiteral(incRecordParamAllowAdditionalFields, expr);
}
}
}
private void createAndAddRecordFieldForIncRecordLiteral(BLangRecordLiteral recordLiteral,
BLangNamedArgsExpression expr) {
BLangSimpleVarRef varRef = new BLangSimpleVarRef();
varRef.variableName = expr.name;
BLangRecordLiteral.BLangRecordKeyValueField recordKeyValueField = ASTBuilderUtil.
createBLangRecordKeyValue(varRef, expr.expr);
recordLiteral.fields.add(recordKeyValueField);
}
private BLangMatchTypedBindingPatternClause getSafeAssignErrorPattern(Location location,
BSymbol invokableSymbol,
List<BType> equivalentErrorTypes,
boolean isCheckPanicExpr) {
BType enclosingFuncReturnType = ((BInvokableType) invokableSymbol.type).retType;
Set<BType> returnTypeSet = enclosingFuncReturnType.tag == TypeTags.UNION ?
((BUnionType) enclosingFuncReturnType).getMemberTypes() :
new LinkedHashSet<BType>() {{
add(enclosingFuncReturnType);
}};
boolean returnOnError = equivalentErrorTypes.stream()
.allMatch(errorType -> returnTypeSet.stream()
.anyMatch(retType -> types.isAssignable(errorType, retType)));
String patternFailureCaseVarName = GEN_VAR_PREFIX.value + "t_failure";
BLangSimpleVariable patternFailureCaseVar =
ASTBuilderUtil.createVariable(location, patternFailureCaseVarName, symTable.errorType, null,
new BVarSymbol(0, names.fromString(patternFailureCaseVarName),
this.env.scope.owner.pkgID, symTable.errorType,
this.env.scope.owner, location, VIRTUAL));
BLangVariableReference patternFailureCaseVarRef =
ASTBuilderUtil.createVariableRef(location, patternFailureCaseVar.symbol);
BLangBlockStmt patternBlockFailureCase = (BLangBlockStmt) TreeBuilder.createBlockNode();
patternBlockFailureCase.pos = location;
if (!isCheckPanicExpr && (returnOnError || this.onFailClause != null)) {
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = location;
failStmt.expr = patternFailureCaseVarRef;
patternBlockFailureCase.stmts.add(failStmt);
if (returnOnError && this.shouldReturnErrors) {
BLangReturn errorReturn = ASTBuilderUtil.createReturnStmt(location,
rewrite(patternFailureCaseVarRef, env));
errorReturn.desugared = true;
failStmt.exprStmt = errorReturn;
}
} else {
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = location;
panicNode.expr = patternFailureCaseVarRef;
patternBlockFailureCase.stmts.add(panicNode);
}
return ASTBuilderUtil.createMatchStatementPattern(location, patternFailureCaseVar, patternBlockFailureCase);
}
private BLangMatchTypedBindingPatternClause getSafeAssignSuccessPattern(Location location,
BType lhsType, boolean isVarDef, BVarSymbol varSymbol, BLangExpression lhsExpr) {
String patternSuccessCaseVarName = GEN_VAR_PREFIX.value + "t_match";
BLangSimpleVariable patternSuccessCaseVar =
ASTBuilderUtil.createVariable(location, patternSuccessCaseVarName, lhsType, null,
new BVarSymbol(0, names.fromString(patternSuccessCaseVarName),
this.env.scope.owner.pkgID, lhsType,
this.env.scope.owner, location, VIRTUAL));
BLangExpression varRefExpr;
if (isVarDef) {
varRefExpr = ASTBuilderUtil.createVariableRef(location, varSymbol);
} else {
varRefExpr = lhsExpr;
}
BLangVariableReference patternSuccessCaseVarRef = ASTBuilderUtil.createVariableRef(location,
patternSuccessCaseVar.symbol);
BLangAssignment assignmentStmtSuccessCase = ASTBuilderUtil.createAssignmentStmt(location,
varRefExpr, patternSuccessCaseVarRef, false);
BLangBlockStmt patternBlockSuccessCase = ASTBuilderUtil.createBlockStmt(location,
new ArrayList<BLangStatement>() {{
add(assignmentStmtSuccessCase);
}});
return ASTBuilderUtil.createMatchStatementPattern(location,
patternSuccessCaseVar, patternBlockSuccessCase);
}
private BLangStatement generateIfElseStmt(BLangMatch matchStmt, BLangSimpleVariable matchExprVar) {
List<BLangMatchBindingPatternClause> patterns = matchStmt.patternClauses;
BLangIf parentIfNode = generateIfElseStmt(patterns.get(0), matchExprVar);
BLangIf currentIfNode = parentIfNode;
for (int i = 1; i < patterns.size(); i++) {
BLangMatchBindingPatternClause patternClause = patterns.get(i);
if (i == patterns.size() - 1 && patternClause.isLastPattern) {
currentIfNode.elseStmt = getMatchPatternElseBody(patternClause, matchExprVar);
} else {
currentIfNode.elseStmt = generateIfElseStmt(patternClause, matchExprVar);
currentIfNode = (BLangIf) currentIfNode.elseStmt;
}
}
return parentIfNode;
}
/**
* Generate an if-else statement from the given match statement.
*
* @param pattern match pattern statement node
* @param matchExprVar variable node of the match expression
* @return if else statement node
*/
private BLangIf generateIfElseStmt(BLangMatchBindingPatternClause pattern, BLangSimpleVariable matchExprVar) {
BLangExpression ifCondition = createPatternIfCondition(pattern, matchExprVar.symbol);
if (NodeKind.MATCH_TYPED_PATTERN_CLAUSE == pattern.getKind()) {
BLangBlockStmt patternBody = getMatchPatternBody(pattern, matchExprVar);
return ASTBuilderUtil.createIfElseStmt(pattern.pos, ifCondition, patternBody, null);
}
BType expectedType = matchExprVar.type;
if (pattern.getKind() == NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE) {
BLangMatchStructuredBindingPatternClause matchPattern = (BLangMatchStructuredBindingPatternClause) pattern;
expectedType = getStructuredBindingPatternType(matchPattern.bindingPatternVariable);
}
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == pattern.getKind()) {
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) pattern;
BLangSimpleVariableDef varDef = forceCastIfApplicable(matchExprVar.symbol, pattern.pos, expectedType);
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(pattern.pos, varDef.var.symbol);
structuredPattern.bindingPatternVariable.expr = matchExprVarRef;
BLangStatement varDefStmt;
if (NodeKind.TUPLE_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createTupleVariableDef(pattern.pos,
(BLangTupleVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.RECORD_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createRecordVariableDef(pattern.pos,
(BLangRecordVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.ERROR_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createErrorVariableDef(pattern.pos,
(BLangErrorVariable) structuredPattern.bindingPatternVariable);
} else {
varDefStmt = ASTBuilderUtil
.createVariableDef(pattern.pos, (BLangSimpleVariable) structuredPattern.bindingPatternVariable);
}
if (structuredPattern.typeGuardExpr != null) {
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(structuredPattern.pos);
blockStmt.addStatement(varDef);
blockStmt.addStatement(varDefStmt);
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt,
structuredPattern.typeGuardExpr);
stmtExpr.type = symTable.booleanType;
ifCondition = ASTBuilderUtil
.createBinaryExpr(pattern.pos, ifCondition, stmtExpr, symTable.booleanType, OperatorKind.AND,
(BOperatorSymbol) symResolver
.resolveBinaryOperator(OperatorKind.AND, symTable.booleanType,
symTable.booleanType));
} else {
structuredPattern.body.stmts.add(0, varDef);
structuredPattern.body.stmts.add(1, varDefStmt);
}
}
return ASTBuilderUtil.createIfElseStmt(pattern.pos, ifCondition, pattern.body, null);
}
private BLangBlockStmt getMatchPatternBody(BLangMatchBindingPatternClause pattern,
BLangSimpleVariable matchExprVar) {
BLangBlockStmt body;
BLangMatchTypedBindingPatternClause patternClause = (BLangMatchTypedBindingPatternClause) pattern;
if (patternClause.variable.name.value.equals(Names.IGNORE.value)) {
return patternClause.body;
}
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(patternClause.pos,
matchExprVar.symbol);
BLangExpression patternVarExpr = addConversionExprIfRequired(matchExprVarRef, patternClause.variable.type);
BLangSimpleVariable patternVar = ASTBuilderUtil.createVariable(patternClause.pos, "",
patternClause.variable.type, patternVarExpr, patternClause.variable.symbol);
BLangSimpleVariableDef patternVarDef = ASTBuilderUtil.createVariableDef(patternVar.pos, patternVar);
patternClause.body.stmts.add(0, patternVarDef);
body = patternClause.body;
return body;
}
private BLangBlockStmt getMatchPatternElseBody(BLangMatchBindingPatternClause pattern,
BLangSimpleVariable matchExprVar) {
BLangBlockStmt body = pattern.body;
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == pattern.getKind()) {
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(pattern.pos, matchExprVar.symbol);
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) pattern;
structuredPattern.bindingPatternVariable.expr = matchExprVarRef;
BLangStatement varDefStmt;
if (NodeKind.TUPLE_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createTupleVariableDef(pattern.pos,
(BLangTupleVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.RECORD_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createRecordVariableDef(pattern.pos,
(BLangRecordVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.ERROR_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createErrorVariableDef(pattern.pos,
(BLangErrorVariable) structuredPattern.bindingPatternVariable);
} else {
varDefStmt = ASTBuilderUtil
.createVariableDef(pattern.pos, (BLangSimpleVariable) structuredPattern.bindingPatternVariable);
}
structuredPattern.body.stmts.add(0, varDefStmt);
body = structuredPattern.body;
}
return body;
}
BLangExpression addConversionExprIfRequired(BLangExpression expr, BType lhsType) {
if (lhsType.tag == TypeTags.NONE) {
return expr;
}
BType rhsType = expr.type;
if (types.isSameType(rhsType, lhsType)) {
return expr;
}
types.setImplicitCastExpr(expr, rhsType, lhsType);
if (expr.impConversionExpr != null) {
return expr;
}
if (lhsType.tag == TypeTags.JSON && rhsType.tag == TypeTags.NIL) {
return expr;
}
if (lhsType.tag == TypeTags.NIL && rhsType.isNullable()) {
return expr;
}
if (lhsType.tag == TypeTags.ARRAY && rhsType.tag == TypeTags.TUPLE) {
return expr;
}
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr)
TreeBuilder.createTypeConversionNode();
conversionExpr.expr = expr;
conversionExpr.targetType = lhsType;
conversionExpr.type = lhsType;
conversionExpr.pos = expr.pos;
conversionExpr.checkTypes = false;
conversionExpr.internal = true;
return conversionExpr;
}
private BLangExpression createPatternIfCondition(BLangMatchBindingPatternClause patternClause,
BVarSymbol varSymbol) {
BType patternType;
switch (patternClause.getKind()) {
case MATCH_STATIC_PATTERN_CLAUSE:
BLangMatchStaticBindingPatternClause staticPattern =
(BLangMatchStaticBindingPatternClause) patternClause;
patternType = staticPattern.literal.type;
break;
case MATCH_STRUCTURED_PATTERN_CLAUSE:
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) patternClause;
patternType = getStructuredBindingPatternType(structuredPattern.bindingPatternVariable);
break;
default:
BLangMatchTypedBindingPatternClause simplePattern = (BLangMatchTypedBindingPatternClause) patternClause;
patternType = simplePattern.variable.type;
break;
}
BLangExpression binaryExpr;
BType[] memberTypes;
if (patternType.tag == TypeTags.UNION) {
BUnionType unionType = (BUnionType) patternType;
memberTypes = unionType.getMemberTypes().toArray(new BType[0]);
} else {
memberTypes = new BType[1];
memberTypes[0] = patternType;
}
if (memberTypes.length == 1) {
binaryExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[0]);
} else {
BLangExpression lhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[0]);
BLangExpression rhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[1]);
binaryExpr = ASTBuilderUtil.createBinaryExpr(patternClause.pos, lhsExpr, rhsExpr,
symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.OR,
lhsExpr.type, rhsExpr.type));
for (int i = 2; i < memberTypes.length; i++) {
lhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[i]);
rhsExpr = binaryExpr;
binaryExpr = ASTBuilderUtil.createBinaryExpr(patternClause.pos, lhsExpr, rhsExpr,
symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.OR,
lhsExpr.type, rhsExpr.type));
}
}
return binaryExpr;
}
private BType getStructuredBindingPatternType(BLangVariable bindingPatternVariable) {
if (NodeKind.TUPLE_VARIABLE == bindingPatternVariable.getKind()) {
BLangTupleVariable tupleVariable = (BLangTupleVariable) bindingPatternVariable;
List<BType> memberTypes = new ArrayList<>();
for (int i = 0; i < tupleVariable.memberVariables.size(); i++) {
memberTypes.add(getStructuredBindingPatternType(tupleVariable.memberVariables.get(i)));
}
BTupleType tupleType = new BTupleType(memberTypes);
if (tupleVariable.restVariable != null) {
BArrayType restArrayType = (BArrayType) getStructuredBindingPatternType(tupleVariable.restVariable);
tupleType.restType = restArrayType.eType;
}
return tupleType;
}
if (NodeKind.RECORD_VARIABLE == bindingPatternVariable.getKind()) {
BLangRecordVariable recordVariable = (BLangRecordVariable) bindingPatternVariable;
BRecordTypeSymbol recordSymbol =
Symbols.createRecordSymbol(0, names.fromString("$anonRecordType$" + UNDERSCORE + recordCount++),
env.enclPkg.symbol.pkgID, null, env.scope.owner, recordVariable.pos,
VIRTUAL);
recordSymbol.initializerFunc = createRecordInitFunc();
recordSymbol.scope = new Scope(recordSymbol);
recordSymbol.scope.define(
names.fromString(recordSymbol.name.value + "." + recordSymbol.initializerFunc.funcName.value),
recordSymbol.initializerFunc.symbol);
LinkedHashMap<String, BField> fields = new LinkedHashMap<>();
List<BLangSimpleVariable> typeDefFields = new ArrayList<>();
for (int i = 0; i < recordVariable.variableList.size(); i++) {
String fieldNameStr = recordVariable.variableList.get(i).key.value;
Name fieldName = names.fromString(fieldNameStr);
BType fieldType = getStructuredBindingPatternType(
recordVariable.variableList.get(i).valueBindingPattern);
BVarSymbol fieldSymbol = new BVarSymbol(Flags.REQUIRED, fieldName, env.enclPkg.symbol.pkgID, fieldType,
recordSymbol, bindingPatternVariable.pos, VIRTUAL);
fields.put(fieldName.value, new BField(fieldName, bindingPatternVariable.pos, fieldSymbol));
typeDefFields.add(ASTBuilderUtil.createVariable(null, fieldNameStr, fieldType, null, fieldSymbol));
recordSymbol.scope.define(fieldName, fieldSymbol);
}
BRecordType recordVarType = new BRecordType(recordSymbol);
recordVarType.fields = fields;
recordVarType.restFieldType = recordVariable.restParam != null ?
((BMapType) ((BLangSimpleVariable) recordVariable.restParam).type).constraint :
symTable.anydataType;
recordSymbol.type = recordVarType;
recordVarType.tsymbol = recordSymbol;
BLangRecordTypeNode recordTypeNode = TypeDefBuilderHelper.createRecordTypeNode(typeDefFields,
recordVarType,
bindingPatternVariable.pos);
recordTypeNode.initFunction =
rewrite(TypeDefBuilderHelper.createInitFunctionForRecordType(recordTypeNode, env, names, symTable),
env);
TypeDefBuilderHelper.addTypeDefinition(recordVarType, recordSymbol, recordTypeNode, env);
return recordVarType;
}
if (NodeKind.ERROR_VARIABLE == bindingPatternVariable.getKind()) {
BLangErrorVariable errorVariable = (BLangErrorVariable) bindingPatternVariable;
BErrorTypeSymbol errorTypeSymbol = new BErrorTypeSymbol(
SymTag.ERROR,
Flags.PUBLIC,
names.fromString("$anonErrorType$" + UNDERSCORE + errorCount++),
env.enclPkg.symbol.pkgID,
null, null, errorVariable.pos, VIRTUAL);
BType detailType;
if ((errorVariable.detail == null || errorVariable.detail.isEmpty()) && errorVariable.restDetail != null) {
detailType = symTable.detailType;
} else {
detailType = createDetailType(errorVariable.detail, errorVariable.restDetail, errorCount++,
errorVariable.pos);
BLangRecordTypeNode recordTypeNode = createRecordTypeNode(errorVariable, (BRecordType) detailType);
recordTypeNode.initFunction = TypeDefBuilderHelper
.createInitFunctionForRecordType(recordTypeNode, env, names, symTable);
TypeDefBuilderHelper.addTypeDefinition(detailType, detailType.tsymbol, recordTypeNode, env);
}
BErrorType errorType = new BErrorType(errorTypeSymbol, detailType);
errorTypeSymbol.type = errorType;
TypeDefBuilderHelper.addTypeDefinition(errorType, errorTypeSymbol, createErrorTypeNode(errorType), env);
return errorType;
}
return bindingPatternVariable.type;
}
private BLangRecordTypeNode createRecordTypeNode(BLangErrorVariable errorVariable, BRecordType detailType) {
List<BLangSimpleVariable> fieldList = new ArrayList<>();
for (BLangErrorVariable.BLangErrorDetailEntry field : errorVariable.detail) {
BVarSymbol symbol = field.valueBindingPattern.symbol;
if (symbol == null) {
symbol = new BVarSymbol(Flags.PUBLIC, names.fromString(field.key.value + "$"),
this.env.enclPkg.packageID, symTable.pureType, null,
field.valueBindingPattern.pos, VIRTUAL);
}
BLangSimpleVariable fieldVar = ASTBuilderUtil.createVariable(
field.valueBindingPattern.pos,
symbol.name.value,
field.valueBindingPattern.type,
field.valueBindingPattern.expr,
symbol);
fieldList.add(fieldVar);
}
return TypeDefBuilderHelper.createRecordTypeNode(fieldList, detailType, errorVariable.pos);
}
private BType createDetailType(List<BLangErrorVariable.BLangErrorDetailEntry> detail,
BLangSimpleVariable restDetail, int errorNo, Location pos) {
BRecordTypeSymbol detailRecordTypeSymbol = new BRecordTypeSymbol(
SymTag.RECORD,
Flags.PUBLIC,
names.fromString("$anonErrorType$" + UNDERSCORE + errorNo + "$detailType"),
env.enclPkg.symbol.pkgID, null, null, pos, VIRTUAL);
detailRecordTypeSymbol.initializerFunc = createRecordInitFunc();
detailRecordTypeSymbol.scope = new Scope(detailRecordTypeSymbol);
detailRecordTypeSymbol.scope.define(
names.fromString(detailRecordTypeSymbol.name.value + "." +
detailRecordTypeSymbol.initializerFunc.funcName.value),
detailRecordTypeSymbol.initializerFunc.symbol);
BRecordType detailRecordType = new BRecordType(detailRecordTypeSymbol);
detailRecordType.restFieldType = symTable.anydataType;
if (restDetail == null) {
detailRecordType.sealed = true;
}
for (BLangErrorVariable.BLangErrorDetailEntry detailEntry : detail) {
Name fieldName = names.fromIdNode(detailEntry.key);
BType fieldType = getStructuredBindingPatternType(detailEntry.valueBindingPattern);
BVarSymbol fieldSym = new BVarSymbol(Flags.PUBLIC, fieldName, detailRecordTypeSymbol.pkgID, fieldType,
detailRecordTypeSymbol, detailEntry.key.pos, VIRTUAL);
detailRecordType.fields.put(fieldName.value, new BField(fieldName, detailEntry.key.pos, fieldSym));
detailRecordTypeSymbol.scope.define(fieldName, fieldSym);
}
return detailRecordType;
}
private BAttachedFunction createRecordInitFunc() {
BInvokableType bInvokableType = new BInvokableType(new ArrayList<>(), symTable.nilType, null);
BInvokableSymbol initFuncSymbol = Symbols.createFunctionSymbol(
Flags.PUBLIC, Names.EMPTY, env.enclPkg.symbol.pkgID, bInvokableType, env.scope.owner, false,
symTable.builtinPos, VIRTUAL);
initFuncSymbol.retType = symTable.nilType;
return new BAttachedFunction(Names.INIT_FUNCTION_SUFFIX, initFuncSymbol, bInvokableType, symTable.builtinPos);
}
BLangErrorType createErrorTypeNode(BErrorType errorType) {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = errorType;
return errorTypeNode;
}
private BLangExpression createPatternMatchBinaryExpr(BLangMatchBindingPatternClause patternClause,
BVarSymbol varSymbol, BType patternType) {
Location pos = patternClause.pos;
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
if (NodeKind.MATCH_STATIC_PATTERN_CLAUSE == patternClause.getKind()) {
BLangMatchStaticBindingPatternClause pattern = (BLangMatchStaticBindingPatternClause) patternClause;
return createBinaryExpression(pos, varRef, pattern.literal);
}
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == patternClause.getKind()) {
return createIsLikeExpression(pos, ASTBuilderUtil.createVariableRef(pos, varSymbol), patternType);
}
if (patternType == symTable.nilType) {
BLangLiteral bLangLiteral = ASTBuilderUtil.createLiteral(pos, symTable.nilType, null);
return ASTBuilderUtil.createBinaryExpr(pos, varRef, bLangLiteral, symTable.booleanType,
OperatorKind.EQUAL, (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.EQUAL,
symTable.anyType, symTable.nilType));
} else {
return createIsAssignableExpression(pos, varSymbol, patternType);
}
}
private BLangExpression createBinaryExpression(Location pos, BLangSimpleVarRef varRef,
BLangExpression expression) {
BLangBinaryExpr binaryExpr;
if (NodeKind.GROUP_EXPR == expression.getKind()) {
return createBinaryExpression(pos, varRef, ((BLangGroupExpr) expression).expression);
}
if (NodeKind.BINARY_EXPR == expression.getKind()) {
binaryExpr = (BLangBinaryExpr) expression;
BLangExpression lhsExpr = createBinaryExpression(pos, varRef, binaryExpr.lhsExpr);
BLangExpression rhsExpr = createBinaryExpression(pos, varRef, binaryExpr.rhsExpr);
binaryExpr = ASTBuilderUtil.createBinaryExpr(pos, lhsExpr, rhsExpr, symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver
.resolveBinaryOperator(OperatorKind.OR, symTable.booleanType, symTable.booleanType));
} else if (expression.getKind() == NodeKind.SIMPLE_VARIABLE_REF
&& ((BLangSimpleVarRef) expression).variableName.value.equals(IGNORE.value)) {
BLangValueType anyType = (BLangValueType) TreeBuilder.createValueTypeNode();
anyType.type = symTable.anyType;
anyType.typeKind = TypeKind.ANY;
return ASTBuilderUtil.createTypeTestExpr(pos, varRef, anyType);
} else {
binaryExpr = ASTBuilderUtil
.createBinaryExpr(pos, varRef, expression, symTable.booleanType, OperatorKind.EQUAL, null);
BSymbol opSymbol = symResolver.resolveBinaryOperator(OperatorKind.EQUAL, varRef.type, expression.type);
if (opSymbol == symTable.notFoundSymbol) {
opSymbol = symResolver
.getBinaryEqualityForTypeSets(OperatorKind.EQUAL, symTable.anydataType, expression.type,
binaryExpr);
}
binaryExpr.opSymbol = (BOperatorSymbol) opSymbol;
}
return binaryExpr;
}
private BLangIsAssignableExpr createIsAssignableExpression(Location pos,
BVarSymbol varSymbol,
BType patternType) {
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
return ASTBuilderUtil.createIsAssignableExpr(pos, varRef, patternType, symTable.booleanType, names,
symTable.builtinPos);
}
private BLangIsLikeExpr createIsLikeExpression(Location pos, BLangExpression expr, BType type) {
return ASTBuilderUtil.createIsLikeExpr(pos, expr, ASTBuilderUtil.createTypeNode(type), symTable.booleanType);
}
private BLangAssignment createAssignmentStmt(BLangSimpleVariable variable) {
BLangSimpleVarRef varRef = (BLangSimpleVarRef) TreeBuilder.createSimpleVariableReferenceNode();
varRef.pos = variable.pos;
varRef.variableName = variable.name;
varRef.symbol = variable.symbol;
varRef.type = variable.type;
BLangAssignment assignmentStmt = (BLangAssignment) TreeBuilder.createAssignmentNode();
assignmentStmt.expr = variable.expr;
assignmentStmt.pos = variable.pos;
assignmentStmt.setVariable(varRef);
return assignmentStmt;
}
private BLangAssignment createStructFieldUpdate(BLangFunction function, BLangSimpleVariable variable,
BVarSymbol selfSymbol) {
return createStructFieldUpdate(function, variable.expr, variable.symbol, variable.type, selfSymbol,
variable.name);
}
private BLangAssignment createStructFieldUpdate(BLangFunction function, BLangExpression expr,
BVarSymbol fieldSymbol, BType fieldType, BVarSymbol selfSymbol,
BLangIdentifier fieldName) {
BLangSimpleVarRef selfVarRef = ASTBuilderUtil.createVariableRef(function.pos, selfSymbol);
BLangFieldBasedAccess fieldAccess = ASTBuilderUtil.createFieldAccessExpr(selfVarRef, fieldName);
fieldAccess.symbol = fieldSymbol;
fieldAccess.type = fieldType;
fieldAccess.isStoreOnCreation = true;
BLangAssignment assignmentStmt = (BLangAssignment) TreeBuilder.createAssignmentNode();
assignmentStmt.expr = expr;
assignmentStmt.pos = function.pos;
assignmentStmt.setVariable(fieldAccess);
SymbolEnv initFuncEnv = SymbolEnv.createFunctionEnv(function, function.symbol.scope, env);
return rewrite(assignmentStmt, initFuncEnv);
}
private void addMatchExprDefaultCase(BLangMatchExpression bLangMatchExpression) {
List<BType> exprTypes;
List<BType> unmatchedTypes = new ArrayList<>();
if (bLangMatchExpression.expr.type.tag == TypeTags.UNION) {
BUnionType unionType = (BUnionType) bLangMatchExpression.expr.type;
exprTypes = new ArrayList<>(unionType.getMemberTypes());
} else {
exprTypes = Lists.of(bLangMatchExpression.type);
}
for (BType type : exprTypes) {
boolean assignable = false;
for (BLangMatchExprPatternClause pattern : bLangMatchExpression.patternClauses) {
if (this.types.isAssignable(type, pattern.variable.type)) {
assignable = true;
break;
}
}
if (!assignable) {
unmatchedTypes.add(type);
}
}
if (unmatchedTypes.isEmpty()) {
return;
}
BType defaultPatternType;
if (unmatchedTypes.size() == 1) {
defaultPatternType = unmatchedTypes.get(0);
} else {
defaultPatternType = BUnionType.create(null, new LinkedHashSet<>(unmatchedTypes));
}
String patternCaseVarName = GEN_VAR_PREFIX.value + "t_match_default";
BLangSimpleVariable patternMatchCaseVar =
ASTBuilderUtil.createVariable(bLangMatchExpression.pos, patternCaseVarName, defaultPatternType, null,
new BVarSymbol(0, names.fromString(patternCaseVarName),
this.env.scope.owner.pkgID, defaultPatternType,
this.env.scope.owner, bLangMatchExpression.pos, VIRTUAL));
BLangMatchExprPatternClause defaultPattern =
(BLangMatchExprPatternClause) TreeBuilder.createMatchExpressionPattern();
defaultPattern.variable = patternMatchCaseVar;
defaultPattern.expr = ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, patternMatchCaseVar.symbol);
defaultPattern.pos = bLangMatchExpression.pos;
bLangMatchExpression.patternClauses.add(defaultPattern);
}
private boolean safeNavigate(BLangAccessExpression accessExpr) {
if (accessExpr.lhsVar || accessExpr.expr == null) {
return false;
}
if (accessExpr.errorSafeNavigation || accessExpr.nilSafeNavigation) {
return true;
}
NodeKind kind = accessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR ||
kind == NodeKind.INDEX_BASED_ACCESS_EXPR) {
return safeNavigate((BLangAccessExpression) accessExpr.expr);
}
return false;
}
private BLangExpression rewriteSafeNavigationExpr(BLangAccessExpression accessExpr) {
BType originalExprType = accessExpr.type;
String matchTempResultVarName = GEN_VAR_PREFIX.value + "temp_result";
BLangSimpleVariable tempResultVar =
ASTBuilderUtil.createVariable(accessExpr.pos, matchTempResultVarName, accessExpr.type, null,
new BVarSymbol(0, names.fromString(matchTempResultVarName),
this.env.scope.owner.pkgID, accessExpr.type,
this.env.scope.owner, accessExpr.pos, VIRTUAL));
BLangSimpleVariableDef tempResultVarDef = ASTBuilderUtil.createVariableDef(accessExpr.pos, tempResultVar);
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(accessExpr.pos, tempResultVar.symbol);
handleSafeNavigation(accessExpr, accessExpr.type, tempResultVar);
BLangMatch matcEXpr = this.matchStmtStack.firstElement();
BLangBlockStmt blockStmt =
ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(tempResultVarDef, matcEXpr));
BLangStatementExpression stmtExpression = createStatementExpression(blockStmt, tempResultVarRef);
stmtExpression.type = originalExprType;
this.matchStmtStack = new Stack<>();
this.accessExprStack = new Stack<>();
this.successPattern = null;
this.safeNavigationAssignment = null;
return stmtExpression;
}
private void handleSafeNavigation(BLangAccessExpression accessExpr, BType type, BLangSimpleVariable tempResultVar) {
if (accessExpr.expr == null) {
return;
}
NodeKind kind = accessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR) {
handleSafeNavigation((BLangAccessExpression) accessExpr.expr, type, tempResultVar);
}
if (!(accessExpr.errorSafeNavigation || accessExpr.nilSafeNavigation)) {
BType originalType = accessExpr.originalType;
if (TypeTags.isXMLTypeTag(originalType.tag)) {
accessExpr.type = BUnionType.create(null, originalType, symTable.errorType);
} else {
accessExpr.type = originalType;
}
if (this.safeNavigationAssignment != null) {
this.safeNavigationAssignment.expr = addConversionExprIfRequired(accessExpr, tempResultVar.type);
}
return;
}
/*
* If the field access is a safe navigation, create a match expression.
* Then chain the current expression as the success-pattern of the parent
* match expr, if available.
* eg:
* x but { <--- parent match expr
* error e => e,
* T t => t.y but { <--- current expr
* error e => e,
* R r => r.z
* }
* }
*/
BLangMatch matchStmt = ASTBuilderUtil.createMatchStatement(accessExpr.pos, accessExpr.expr, new ArrayList<>());
boolean isAllTypesRecords = false;
LinkedHashSet<BType> memTypes = new LinkedHashSet<>();
if (accessExpr.expr.type.tag == TypeTags.UNION) {
memTypes = new LinkedHashSet<>(((BUnionType) accessExpr.expr.type).getMemberTypes());
isAllTypesRecords = isAllTypesAreRecordsInUnion(memTypes);
}
if (accessExpr.nilSafeNavigation) {
matchStmt.patternClauses.add(getMatchNullPattern(accessExpr, tempResultVar));
matchStmt.type = type;
memTypes.remove(symTable.nilType);
}
if (accessExpr.errorSafeNavigation) {
matchStmt.patternClauses.add(getMatchErrorPattern(accessExpr, tempResultVar));
matchStmt.type = type;
matchStmt.pos = accessExpr.pos;
memTypes.remove(symTable.errorType);
}
BLangMatchTypedBindingPatternClause successPattern = null;
Name field = getFieldName(accessExpr);
if (field == Names.EMPTY) {
successPattern = getSuccessPattern(accessExpr.expr.type, accessExpr, tempResultVar,
accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
return;
}
if (isAllTypesRecords) {
for (BType memberType : memTypes) {
if (((BRecordType) memberType).fields.containsKey(field.value)) {
successPattern = getSuccessPattern(memberType, accessExpr, tempResultVar,
accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
}
}
matchStmt.patternClauses.add(getMatchAllAndNilReturnPattern(accessExpr, tempResultVar));
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
return;
}
successPattern =
getSuccessPattern(accessExpr.expr.type, accessExpr, tempResultVar, accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
}
private void pushToMatchStatementStack(BLangMatch matchStmt, BLangAccessExpression accessExpr,
BLangMatchTypedBindingPatternClause successPattern) {
this.matchStmtStack.push(matchStmt);
if (this.successPattern != null) {
this.successPattern.body = ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(matchStmt));
}
this.successPattern = successPattern;
}
private Name getFieldName(BLangAccessExpression accessExpr) {
Name field = Names.EMPTY;
if (accessExpr.getKind() == NodeKind.FIELD_BASED_ACCESS_EXPR) {
field = new Name(((BLangFieldBasedAccess) accessExpr).field.value);
} else if (accessExpr.getKind() == NodeKind.INDEX_BASED_ACCESS_EXPR) {
BLangExpression indexBasedExpression = ((BLangIndexBasedAccess) accessExpr).indexExpr;
if (indexBasedExpression.getKind() == NodeKind.LITERAL) {
field = new Name(((BLangLiteral) indexBasedExpression).value.toString());
}
}
return field;
}
private boolean isAllTypesAreRecordsInUnion(LinkedHashSet<BType> memTypes) {
for (BType memType : memTypes) {
int typeTag = memType.tag;
if (typeTag != TypeTags.RECORD && typeTag != TypeTags.ERROR && typeTag != TypeTags.NIL) {
return false;
}
}
return true;
}
private BLangMatchTypedBindingPatternClause getMatchErrorPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
String errorPatternVarName = GEN_VAR_PREFIX.value + "t_match_error";
BLangSimpleVariable errorPatternVar =
ASTBuilderUtil.createVariable(expr.pos, errorPatternVarName, symTable.errorType, null,
new BVarSymbol(0, names.fromString(errorPatternVarName),
this.env.scope.owner.pkgID, symTable.errorType,
this.env.scope.owner, expr.pos, VIRTUAL));
BLangSimpleVarRef assignmentRhsExpr = ASTBuilderUtil.createVariableRef(expr.pos, errorPatternVar.symbol);
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause errorPattern = ASTBuilderUtil
.createMatchStatementPattern(expr.pos, errorPatternVar, patternBody);
return errorPattern;
}
private BLangMatchExprPatternClause getMatchNullPatternGivenExpression(Location pos,
BLangExpression expr) {
String nullPatternVarName = IGNORE.toString();
BLangSimpleVariable errorPatternVar =
ASTBuilderUtil.createVariable(pos, nullPatternVarName, symTable.nilType, null,
new BVarSymbol(0, names.fromString(nullPatternVarName),
this.env.scope.owner.pkgID, symTable.nilType,
this.env.scope.owner, pos, VIRTUAL));
BLangMatchExprPatternClause nullPattern =
(BLangMatchExprPatternClause) TreeBuilder.createMatchExpressionPattern();
nullPattern.variable = errorPatternVar;
nullPattern.expr = expr;
nullPattern.pos = pos;
return nullPattern;
}
private BLangMatchTypedBindingPatternClause getMatchNullPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
String nullPatternVarName = GEN_VAR_PREFIX.value + "t_match_null";
BLangSimpleVariable nullPatternVar =
ASTBuilderUtil.createVariable(expr.pos, nullPatternVarName, symTable.nilType, null,
new BVarSymbol(0, names.fromString(nullPatternVarName),
this.env.scope.owner.pkgID, symTable.nilType,
this.env.scope.owner, expr.pos, VIRTUAL));
BLangSimpleVarRef assignmentRhsExpr = ASTBuilderUtil.createVariableRef(expr.pos, nullPatternVar.symbol);
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause nullPattern = ASTBuilderUtil
.createMatchStatementPattern(expr.pos, nullPatternVar, patternBody);
return nullPattern;
}
private BLangMatchStaticBindingPatternClause getMatchAllAndNilReturnPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, createLiteral(expr.pos,
symTable.nilType, Names.NIL_VALUE), false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchStaticBindingPatternClause matchAllPattern =
(BLangMatchStaticBindingPatternClause) TreeBuilder.createMatchStatementStaticBindingPattern();
String matchAllVarName = "_";
matchAllPattern.literal =
ASTBuilderUtil.createVariableRef(expr.pos, new BVarSymbol(0, names.fromString(matchAllVarName),
this.env.scope.owner.pkgID, symTable.anyType,
this.env.scope.owner, expr.pos, VIRTUAL));
matchAllPattern.body = patternBody;
return matchAllPattern;
}
private BLangMatchTypedBindingPatternClause getSuccessPattern(BType type, BLangAccessExpression accessExpr,
BLangSimpleVariable tempResultVar,
boolean liftError) {
type = types.getSafeType(type, true, liftError);
String successPatternVarName = GEN_VAR_PREFIX.value + "t_match_success";
BVarSymbol successPatternSymbol;
if (type.tag == TypeTags.INVOKABLE) {
successPatternSymbol = new BInvokableSymbol(SymTag.VARIABLE, 0, names.fromString(successPatternVarName),
this.env.scope.owner.pkgID, type, this.env.scope.owner,
accessExpr.pos, VIRTUAL);
} else {
successPatternSymbol = new BVarSymbol(0, names.fromString(successPatternVarName),
this.env.scope.owner.pkgID, type, this.env.scope.owner,
accessExpr.pos, VIRTUAL);
}
BLangSimpleVariable successPatternVar = ASTBuilderUtil.createVariable(accessExpr.pos, successPatternVarName,
type, null, successPatternSymbol);
BLangAccessExpression tempAccessExpr = nodeCloner.clone(accessExpr);
if (accessExpr.getKind() == NodeKind.INDEX_BASED_ACCESS_EXPR) {
((BLangIndexBasedAccess) tempAccessExpr).indexExpr = ((BLangIndexBasedAccess) accessExpr).indexExpr;
}
if (accessExpr instanceof BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) {
((BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) tempAccessExpr).nsSymbol =
((BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) accessExpr).nsSymbol;
}
tempAccessExpr.expr = ASTBuilderUtil.createVariableRef(accessExpr.pos, successPatternVar.symbol);
tempAccessExpr.errorSafeNavigation = false;
tempAccessExpr.nilSafeNavigation = false;
accessExpr.cloneRef = null;
if (TypeTags.isXMLTypeTag(tempAccessExpr.expr.type.tag)) {
tempAccessExpr.type = BUnionType.create(null, accessExpr.originalType, symTable.errorType,
symTable.nilType);
} else {
tempAccessExpr.type = accessExpr.originalType;
}
tempAccessExpr.optionalFieldAccess = accessExpr.optionalFieldAccess;
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(accessExpr.pos, tempResultVar.symbol);
BLangExpression assignmentRhsExpr = addConversionExprIfRequired(tempAccessExpr, tempResultVarRef.type);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(accessExpr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause successPattern =
ASTBuilderUtil.createMatchStatementPattern(accessExpr.pos, successPatternVar, patternBody);
this.safeNavigationAssignment = assignmentStmt;
return successPattern;
}
private boolean safeNavigateLHS(BLangExpression expr) {
if (expr.getKind() != NodeKind.FIELD_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.INDEX_BASED_ACCESS_EXPR) {
return false;
}
BLangExpression varRef = ((BLangAccessExpression) expr).expr;
if (varRef.type.isNullable()) {
return true;
}
return safeNavigateLHS(varRef);
}
private BLangStatement rewriteSafeNavigationAssignment(BLangAccessExpression accessExpr, BLangExpression rhsExpr,
boolean safeAssignment) {
this.accessExprStack = new Stack<>();
List<BLangStatement> stmts = new ArrayList<>();
createLHSSafeNavigation(stmts, accessExpr.expr);
BLangAssignment assignment = ASTBuilderUtil.createAssignmentStmt(accessExpr.pos,
cloneExpression(accessExpr), rhsExpr);
stmts.add(assignment);
return ASTBuilderUtil.createBlockStmt(accessExpr.pos, stmts);
}
private void createLHSSafeNavigation(List<BLangStatement> stmts, BLangExpression expr) {
NodeKind kind = expr.getKind();
boolean root = false;
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR ||
kind == NodeKind.INVOCATION) {
BLangAccessExpression accessExpr = (BLangAccessExpression) expr;
createLHSSafeNavigation(stmts, accessExpr.expr);
accessExpr.expr = accessExprStack.pop();
} else {
root = true;
}
if (expr.getKind() == NodeKind.INVOCATION) {
BLangInvocation invocation = (BLangInvocation) expr;
BVarSymbol interMediateSymbol = new BVarSymbol(0,
names.fromString(GEN_VAR_PREFIX.value + "i_intermediate"),
this.env.scope.owner.pkgID, invocation.type,
this.env.scope.owner, expr.pos, VIRTUAL);
BLangSimpleVariable intermediateVariable = ASTBuilderUtil.createVariable(expr.pos,
interMediateSymbol.name.value,
invocation.type, invocation,
interMediateSymbol);
BLangSimpleVariableDef intermediateVariableDefinition = ASTBuilderUtil.createVariableDef(invocation.pos,
intermediateVariable);
stmts.add(intermediateVariableDefinition);
expr = ASTBuilderUtil.createVariableRef(invocation.pos, interMediateSymbol);
}
if (expr.type.isNullable()) {
BLangTypeTestExpr isNillTest = ASTBuilderUtil.createTypeTestExpr(expr.pos, expr, getNillTypeNode());
isNillTest.type = symTable.booleanType;
BLangBlockStmt thenStmt = ASTBuilderUtil.createBlockStmt(expr.pos);
expr = cloneExpression(expr);
expr.type = types.getSafeType(expr.type, true, false);
if (isDefaultableMappingType(expr.type) && !root) {
BLangRecordLiteral jsonLiteral = (BLangRecordLiteral) TreeBuilder.createRecordLiteralNode();
jsonLiteral.type = expr.type;
jsonLiteral.pos = expr.pos;
BLangAssignment assignment = ASTBuilderUtil.createAssignmentStmt(expr.pos,
expr, jsonLiteral);
thenStmt.addStatement(assignment);
} else {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = ERROR_REASON_NULL_REFERENCE_ERROR;
literal.type = symTable.stringType;
BLangErrorConstructorExpr errorConstructorExpr =
(BLangErrorConstructorExpr) TreeBuilder.createErrorConstructorExpressionNode();
BSymbol symbol = symResolver.lookupMainSpaceSymbolInPackage(errorConstructorExpr.pos, env,
names.fromString(""), names.fromString("error"));
errorConstructorExpr.type = symbol.type;
errorConstructorExpr.pos = expr.pos;
List<BLangExpression> positionalArgs = new ArrayList<>();
positionalArgs.add(literal);
errorConstructorExpr.positionalArgs = positionalArgs;
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.expr = errorConstructorExpr;
panicNode.pos = expr.pos;
thenStmt.addStatement(panicNode);
}
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(expr.pos, isNillTest, thenStmt, null);
stmts.add(ifelse);
}
accessExprStack.push(expr);
}
BLangValueType getNillTypeNode() {
BLangValueType nillTypeNode = (BLangValueType) TreeBuilder.createValueTypeNode();
nillTypeNode.typeKind = TypeKind.NIL;
nillTypeNode.type = symTable.nilType;
return nillTypeNode;
}
private BLangAccessExpression cloneExpression(BLangExpression expr) {
switch (expr.getKind()) {
case SIMPLE_VARIABLE_REF:
return ASTBuilderUtil.createVariableRef(expr.pos, ((BLangSimpleVarRef) expr).symbol);
case FIELD_BASED_ACCESS_EXPR:
case INDEX_BASED_ACCESS_EXPR:
case INVOCATION:
return cloneAccessExpr((BLangAccessExpression) expr);
default:
throw new IllegalStateException();
}
}
private BLangAccessExpression cloneAccessExpr(BLangAccessExpression originalAccessExpr) {
if (originalAccessExpr.expr == null) {
return originalAccessExpr;
}
BLangExpression varRef;
NodeKind kind = originalAccessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR ||
kind == NodeKind.INVOCATION) {
varRef = cloneAccessExpr((BLangAccessExpression) originalAccessExpr.expr);
} else {
varRef = cloneExpression(originalAccessExpr.expr);
}
varRef.type = types.getSafeType(originalAccessExpr.expr.type, true, false);
BLangAccessExpression accessExpr;
switch (originalAccessExpr.getKind()) {
case FIELD_BASED_ACCESS_EXPR:
accessExpr = ASTBuilderUtil.createFieldAccessExpr((BLangAccessibleExpression) varRef,
((BLangFieldBasedAccess) originalAccessExpr).field);
break;
case INDEX_BASED_ACCESS_EXPR:
accessExpr = ASTBuilderUtil.createIndexAccessExpr((BLangAccessibleExpression) varRef,
((BLangIndexBasedAccess) originalAccessExpr).indexExpr);
break;
case INVOCATION:
accessExpr = null;
break;
default:
throw new IllegalStateException();
}
accessExpr.originalType = originalAccessExpr.originalType;
accessExpr.pos = originalAccessExpr.pos;
accessExpr.lhsVar = originalAccessExpr.lhsVar;
accessExpr.symbol = originalAccessExpr.symbol;
accessExpr.errorSafeNavigation = false;
accessExpr.nilSafeNavigation = false;
accessExpr.type = originalAccessExpr.originalType;
return accessExpr;
}
private BLangBinaryExpr getModifiedIntRangeStartExpr(BLangExpression expr) {
BLangLiteral constOneLiteral = ASTBuilderUtil.createLiteral(expr.pos, symTable.intType, 1L);
return ASTBuilderUtil.createBinaryExpr(expr.pos, expr, constOneLiteral, symTable.intType, OperatorKind.ADD,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.ADD,
symTable.intType,
symTable.intType));
}
private BLangBinaryExpr getModifiedIntRangeEndExpr(BLangExpression expr) {
BLangLiteral constOneLiteral = ASTBuilderUtil.createLiteral(expr.pos, symTable.intType, 1L);
return ASTBuilderUtil.createBinaryExpr(expr.pos, expr, constOneLiteral, symTable.intType, OperatorKind.SUB,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.SUB,
symTable.intType,
symTable.intType));
}
private BLangLiteral getBooleanLiteral(boolean value) {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = value;
literal.type = symTable.booleanType;
literal.pos = symTable.builtinPos;
return literal;
}
private boolean isDefaultableMappingType(BType type) {
switch (types.getSafeType(type, true, false).tag) {
case TypeTags.JSON:
case TypeTags.MAP:
case TypeTags.RECORD:
return true;
default:
return false;
}
}
private BLangFunction createInitFunctionForClassDefn(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction initFunction =
TypeDefBuilderHelper.createInitFunctionForStructureType(classDefinition.pos, classDefinition.symbol,
env, names, Names.GENERATED_INIT_SUFFIX, symTable, classDefinition.type);
BObjectTypeSymbol typeSymbol = ((BObjectTypeSymbol) classDefinition.type.tsymbol);
typeSymbol.generatedInitializerFunc = new BAttachedFunction(Names.GENERATED_INIT_SUFFIX, initFunction.symbol,
(BInvokableType) initFunction.type, classDefinition.pos);
classDefinition.generatedInitFunction = initFunction;
initFunction.returnTypeNode.type = symTable.nilType;
return rewrite(initFunction, env);
}
private void visitBinaryLogicalExpr(BLangBinaryExpr binaryExpr) {
/*
* Desugar (lhsExpr && rhsExpr) to following if-else:
*
* logical AND:
* -------------
* T $result$;
* if (lhsExpr) {
* $result$ = rhsExpr;
* } else {
* $result$ = false;
* }
*
* logical OR:
* -------------
* T $result$;
* if (lhsExpr) {
* $result$ = true;
* } else {
* $result$ = rhsExpr;
* }
*
*/
BLangSimpleVariableDef resultVarDef = createVarDef("$result$", binaryExpr.type, null, symTable.builtinPos);
BLangBlockStmt thenBody = ASTBuilderUtil.createBlockStmt(binaryExpr.pos);
BLangBlockStmt elseBody = ASTBuilderUtil.createBlockStmt(binaryExpr.pos);
BLangSimpleVarRef thenResultVarRef = ASTBuilderUtil.createVariableRef(symTable.builtinPos,
resultVarDef.var.symbol);
BLangExpression thenResult;
if (binaryExpr.opKind == OperatorKind.AND) {
thenResult = binaryExpr.rhsExpr;
} else {
thenResult = getBooleanLiteral(true);
}
BLangAssignment thenAssignment =
ASTBuilderUtil.createAssignmentStmt(binaryExpr.pos, thenResultVarRef, thenResult);
thenBody.addStatement(thenAssignment);
BLangExpression elseResult;
BLangSimpleVarRef elseResultVarRef = ASTBuilderUtil.createVariableRef(symTable.builtinPos,
resultVarDef.var.symbol);
if (binaryExpr.opKind == OperatorKind.AND) {
elseResult = getBooleanLiteral(false);
} else {
elseResult = binaryExpr.rhsExpr;
}
BLangAssignment elseAssignment =
ASTBuilderUtil.createAssignmentStmt(binaryExpr.pos, elseResultVarRef, elseResult);
elseBody.addStatement(elseAssignment);
BLangSimpleVarRef resultVarRef = ASTBuilderUtil.createVariableRef(binaryExpr.pos, resultVarDef.var.symbol);
BLangIf ifElse = ASTBuilderUtil.createIfElseStmt(binaryExpr.pos, binaryExpr.lhsExpr, thenBody, elseBody);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(binaryExpr.pos, Lists.of(resultVarDef, ifElse));
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = binaryExpr.type;
result = rewriteExpr(stmtExpr);
}
/**
* Split packahe init function into several smaller functions.
*
* @param packageNode package node
* @param env symbol environment
* @return initial init function but trimmed in size
*/
private BLangFunction splitInitFunction(BLangPackage packageNode, SymbolEnv env) {
int methodSize = INIT_METHOD_SPLIT_SIZE;
BLangBlockFunctionBody funcBody = (BLangBlockFunctionBody) packageNode.initFunction.body;
if (!isJvmTarget) {
return packageNode.initFunction;
}
BLangFunction initFunction = packageNode.initFunction;
List<BLangFunction> generatedFunctions = new ArrayList<>();
List<BLangStatement> stmts = new ArrayList<>(funcBody.stmts);
funcBody.stmts.clear();
BLangFunction newFunc = initFunction;
BLangBlockFunctionBody newFuncBody = (BLangBlockFunctionBody) newFunc.body;
int varDefIndex = 0;
for (int i = 0; i < stmts.size(); i++) {
BLangStatement statement = stmts.get(i);
if (statement.getKind() == NodeKind.VARIABLE_DEF) {
break;
}
varDefIndex++;
if (i > 0 && (i % methodSize == 0 || isAssignmentWithInitOrRecordLiteralExpr(statement))) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.add(stmts.get(i));
}
List<BLangStatement> chunkStmts = new ArrayList<>();
for (int i = varDefIndex; i < stmts.size(); i++) {
BLangStatement stmt = stmts.get(i);
chunkStmts.add(stmt);
varDefIndex++;
if ((stmt.getKind() == NodeKind.ASSIGNMENT) &&
(((BLangAssignment) stmt).expr.getKind() == NodeKind.SERVICE_CONSTRUCTOR) &&
(newFuncBody.stmts.size() + chunkStmts.size() > methodSize)) {
if (newFuncBody.stmts.size() + chunkStmts.size() > methodSize) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.addAll(chunkStmts);
chunkStmts.clear();
} else if ((stmt.getKind() == NodeKind.ASSIGNMENT) &&
(((BLangAssignment) stmt).varRef instanceof BLangPackageVarRef) &&
Symbols.isFlagOn(((BLangPackageVarRef) ((BLangAssignment) stmt).varRef).varSymbol.flags,
Flags.LISTENER)
) {
break;
}
}
newFuncBody.stmts.addAll(chunkStmts);
for (int i = varDefIndex; i < stmts.size(); i++) {
if (i > 0 && i % methodSize == 0) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.add(stmts.get(i));
}
generatedFunctions.add(newFunc);
for (int j = 0; j < generatedFunctions.size() - 1; j++) {
BLangFunction thisFunction = generatedFunctions.get(j);
BLangCheckedExpr checkedExpr =
ASTBuilderUtil.createCheckExpr(initFunction.pos,
createInvocationNode(generatedFunctions.get(j + 1).name.value,
new ArrayList<>(), symTable.errorOrNilType),
symTable.nilType);
checkedExpr.equivalentErrorTypeList.add(symTable.errorType);
BLangExpressionStmt expressionStmt = ASTBuilderUtil
.createExpressionStmt(thisFunction.pos, (BLangBlockFunctionBody) thisFunction.body);
expressionStmt.expr = checkedExpr;
expressionStmt.expr.pos = initFunction.pos;
if (j > 0) {
thisFunction = rewrite(thisFunction, env);
packageNode.functions.add(thisFunction);
packageNode.topLevelNodes.add(thisFunction);
}
}
if (generatedFunctions.size() > 1) {
BLangFunction lastFunc = generatedFunctions.get(generatedFunctions.size() - 1);
lastFunc = rewrite(lastFunc, env);
packageNode.functions.add(lastFunc);
packageNode.topLevelNodes.add(lastFunc);
}
return generatedFunctions.get(0);
}
private boolean isAssignmentWithInitOrRecordLiteralExpr(BLangStatement statement) {
if (statement.getKind() == NodeKind.ASSIGNMENT) {
NodeKind exprKind = ((BLangAssignment) statement).getExpression().getKind();
return exprKind == NodeKind.TYPE_INIT_EXPR || exprKind == NodeKind.RECORD_LITERAL_EXPR;
}
return false;
}
/**
* Create an intermediate package init function.
*
* @param pkgNode package node
* @param env symbol environment of package
*/
private BLangFunction createIntermediateInitFunction(BLangPackage pkgNode, SymbolEnv env) {
String alias = pkgNode.symbol.pkgID.toString();
BLangFunction initFunction = ASTBuilderUtil
.createInitFunctionWithErrorOrNilReturn(pkgNode.pos, alias,
new Name(Names.INIT_FUNCTION_SUFFIX.value
+ this.initFuncIndex++), symTable);
createInvokableSymbol(initFunction, env);
return initFunction;
}
private BType getRestType(BInvokableSymbol invokableSymbol) {
if (invokableSymbol != null && invokableSymbol.restParam != null) {
return invokableSymbol.restParam.type;
}
return null;
}
private BType getRestType(BLangFunction function) {
if (function != null && function.restParam != null) {
return function.restParam.type;
}
return null;
}
private BVarSymbol getRestSymbol(BLangFunction function) {
if (function != null && function.restParam != null) {
return function.restParam.symbol;
}
return null;
}
private boolean isComputedKey(RecordLiteralNode.RecordField field) {
if (!field.isKeyValueField()) {
return false;
}
return ((BLangRecordLiteral.BLangRecordKeyValueField) field).key.computedKey;
}
private BLangRecordLiteral rewriteMappingConstructor(BLangRecordLiteral mappingConstructorExpr) {
List<RecordLiteralNode.RecordField> fields = mappingConstructorExpr.fields;
BType type = mappingConstructorExpr.type;
Location pos = mappingConstructorExpr.pos;
List<RecordLiteralNode.RecordField> rewrittenFields = new ArrayList<>(fields.size());
for (RecordLiteralNode.RecordField field : fields) {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKeyValueField keyValueField =
(BLangRecordLiteral.BLangRecordKeyValueField) field;
BLangRecordLiteral.BLangRecordKey key = keyValueField.key;
BLangExpression origKey = key.expr;
BLangExpression keyExpr;
if (key.computedKey) {
keyExpr = origKey;
} else {
keyExpr = origKey.getKind() == NodeKind.SIMPLE_VARIABLE_REF ? createStringLiteral(pos,
StringEscapeUtils.unescapeJava(((BLangSimpleVarRef) origKey).variableName.value)) :
((BLangLiteral) origKey);
}
BLangRecordLiteral.BLangRecordKeyValueField rewrittenField =
ASTBuilderUtil.createBLangRecordKeyValue(rewriteExpr(keyExpr),
rewriteExpr(keyValueField.valueExpr));
rewrittenField.pos = keyValueField.pos;
rewrittenField.key.pos = key.pos;
rewrittenFields.add(rewrittenField);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangSimpleVarRef varRefField = (BLangSimpleVarRef) field;
rewrittenFields.add(ASTBuilderUtil.createBLangRecordKeyValue(
rewriteExpr(createStringLiteral(pos,
StringEscapeUtils.unescapeJava(varRefField.variableName.value))),
rewriteExpr(varRefField)));
} else {
BLangRecordLiteral.BLangRecordSpreadOperatorField spreadOpField =
(BLangRecordLiteral.BLangRecordSpreadOperatorField) field;
spreadOpField.expr = rewriteExpr(spreadOpField.expr);
rewrittenFields.add(spreadOpField);
}
}
fields.clear();
return type.tag == TypeTags.RECORD ? new BLangStructLiteral(pos, type, rewrittenFields) :
new BLangMapLiteral(pos, type, rewrittenFields);
}
protected void addTransactionInternalModuleImport() {
PackageID packageID = new PackageID(Names.BALLERINA_INTERNAL_ORG, Lists.of(Names.TRANSACTION),
Names.TRANSACTION_INTERNAL_VERSION);
if (!env.enclPkg.packageID.equals(packageID)) {
BLangImportPackage importDcl = (BLangImportPackage) TreeBuilder.createImportPackageNode();
List<BLangIdentifier> pkgNameComps = new ArrayList<>();
pkgNameComps.add(ASTBuilderUtil.createIdentifier(env.enclPkg.pos, Names.TRANSACTION.value));
importDcl.pkgNameComps = pkgNameComps;
importDcl.pos = env.enclPkg.symbol.pos;
importDcl.orgName = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, Names.BALLERINA_INTERNAL_ORG.value);
importDcl.alias = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, "trx");
importDcl.version = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, "");
importDcl.symbol = symTable.internalTransactionModuleSymbol;
env.enclPkg.imports.add(importDcl);
env.enclPkg.symbol.imports.add(importDcl.symbol);
}
}
}
|
}
|
private BLangBlockStmt desugarForeachToWhile(BLangForeach foreach, BLangSimpleVariableDef varDef) {
BVarSymbol iteratorSymbol = varDef.var.symbol;
BVarSymbol resultSymbol = new BVarSymbol(0, names.fromString("$result$"), this.env.scope.owner.pkgID,
foreach.nillableResultType, this.env.scope.owner, foreach.pos,
VIRTUAL);
BLangSimpleVariableDef resultVariableDefinition = getIteratorNextVariableDefinition(foreach.pos,
foreach.nillableResultType, iteratorSymbol, resultSymbol);
BLangType userDefineType = getUserDefineTypeNode(foreach.resultType);
BLangSimpleVarRef resultReferenceInWhile = ASTBuilderUtil.createVariableRef(foreach.pos, resultSymbol);
BLangTypeTestExpr typeTestExpr = ASTBuilderUtil
.createTypeTestExpr(foreach.pos, resultReferenceInWhile, userDefineType);
BLangWhile whileNode = (BLangWhile) TreeBuilder.createWhileNode();
whileNode.pos = foreach.pos;
whileNode.expr = typeTestExpr;
whileNode.body = foreach.body;
BLangAssignment resultAssignment = getIteratorNextAssignment(foreach.pos, iteratorSymbol, resultSymbol);
VariableDefinitionNode variableDefinitionNode = foreach.variableDefinitionNode;
BLangFieldBasedAccess valueAccessExpr = getValueAccessExpression(foreach.pos, foreach.varType, resultSymbol);
valueAccessExpr.expr = addConversionExprIfRequired(valueAccessExpr.expr,
types.getSafeType(valueAccessExpr.expr.type, true, false));
variableDefinitionNode.getVariable()
.setInitialExpression(addConversionExprIfRequired(valueAccessExpr, foreach.varType));
whileNode.body.stmts.add(0, (BLangStatement) variableDefinitionNode);
whileNode.body.stmts.add(1, resultAssignment);
BLangBlockStmt blockNode = ASTBuilderUtil.createBlockStmt(foreach.pos);
blockNode.addStatement(varDef);
blockNode.addStatement(resultVariableDefinition);
blockNode.addStatement(whileNode);
return blockNode;
}
private BLangType getUserDefineTypeNode(BType type) {
BLangUserDefinedType recordType =
new BLangUserDefinedType(ASTBuilderUtil.createIdentifier(null, ""),
ASTBuilderUtil.createIdentifier(null, ""));
recordType.type = type;
return recordType;
}
@Override
public void visit(BLangWhile whileNode) {
if (whileNode.onFailClause != null) {
BLangOnFailClause onFailClause = whileNode.onFailClause;
whileNode.onFailClause = null;
whileNode.body.isBreakable = false;
BLangDo doStmt = wrapStatementWithinDo(whileNode.pos, whileNode, onFailClause);
result = rewrite(doStmt, env);
} else {
whileNode.expr = rewriteExpr(whileNode.expr);
whileNode.body = rewrite(whileNode.body, env);
result = whileNode;
}
}
private BLangDo wrapStatementWithinDo(Location location, BLangStatement statement,
BLangOnFailClause onFailClause) {
BLangDo bLDo = (BLangDo) TreeBuilder.createDoNode();
BLangBlockStmt doBlock = ASTBuilderUtil.createBlockStmt(location);
doBlock.scope = new Scope(env.scope.owner);
bLDo.body = doBlock;
bLDo.pos = location;
bLDo.onFailClause = onFailClause;
bLDo.body.isBreakable = true;
doBlock.stmts.add(statement);
return bLDo;
}
@Override
public void visit(BLangLock lockNode) {
BLangOnFailClause currentOnFailClause = this.onFailClause;
BLangSimpleVariableDef currentOnFailCallDef = this.onFailCallFuncDef;
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(lockNode.pos);
if (lockNode.onFailClause != null) {
blockStmt.isBreakable = true;
rewrite(lockNode.onFailClause, env);
}
BLangLockStmt lockStmt = new BLangLockStmt(lockNode.pos);
blockStmt.addStatement(lockStmt);
enclLocks.push(lockStmt);
BLangLiteral nilLiteral = ASTBuilderUtil.createLiteral(lockNode.pos, symTable.nilType, Names.NIL_VALUE);
BType nillableError = BUnionType.create(null, symTable.errorType, symTable.nilType);
BLangStatementExpression statementExpression = createStatementExpression(lockNode.body, nilLiteral);
statementExpression.type = symTable.nilType;
BLangTrapExpr trapExpr = (BLangTrapExpr) TreeBuilder.createTrapExpressionNode();
trapExpr.type = nillableError;
trapExpr.expr = statementExpression;
BVarSymbol nillableErrorVarSymbol = new BVarSymbol(0, names.fromString("$errorResult"),
this.env.scope.owner.pkgID, nillableError,
this.env.scope.owner, lockNode.pos, VIRTUAL);
BLangSimpleVariable simpleVariable = ASTBuilderUtil.createVariable(lockNode.pos, "$errorResult",
nillableError, trapExpr,
nillableErrorVarSymbol);
BLangSimpleVariableDef simpleVariableDef = ASTBuilderUtil.createVariableDef(lockNode.pos, simpleVariable);
blockStmt.addStatement(simpleVariableDef);
BLangUnLockStmt unLockStmt = new BLangUnLockStmt(lockNode.pos);
unLockStmt.relatedLock = lockStmt;
blockStmt.addStatement(unLockStmt);
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(lockNode.pos, nillableErrorVarSymbol);
BLangBlockStmt ifBody = ASTBuilderUtil.createBlockStmt(lockNode.pos);
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = lockNode.pos;
panicNode.expr = addConversionExprIfRequired(varRef, symTable.errorType);
ifBody.addStatement(panicNode);
BLangTypeTestExpr isErrorTest =
ASTBuilderUtil.createTypeTestExpr(lockNode.pos, varRef, getErrorTypeNode());
isErrorTest.type = symTable.booleanType;
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(lockNode.pos, isErrorTest, ifBody, null);
blockStmt.addStatement(ifelse);
result = rewrite(blockStmt, env);
enclLocks.pop();
this.onFailClause = currentOnFailClause;
this.onFailCallFuncDef = currentOnFailCallDef;
}
@Override
public void visit(BLangLockStmt lockStmt) {
result = lockStmt;
}
@Override
public void visit(BLangUnLockStmt unLockStmt) {
result = unLockStmt;
}
private BLangOnFailClause createTrxInternalOnFail(Location pos, BLangSimpleVarRef shouldPanicRef) {
BLangOnFailClause trxOnFailClause = (BLangOnFailClause) TreeBuilder.createOnFailClauseNode();
trxOnFailClause.pos = pos;
trxOnFailClause.body = ASTBuilderUtil.createBlockStmt(pos);
trxOnFailClause.body.scope = new Scope(env.scope.owner);
trxOnFailClause.isInternal = true;
BVarSymbol trxOnFailErrorSym = new BVarSymbol(0, names.fromString("$trxError$"),
env.scope.owner.pkgID, symTable.errorType, env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable trxOnFailError = ASTBuilderUtil.createVariable(pos,
"$trxError$", symTable.errorType, null, trxOnFailErrorSym);
trxOnFailClause.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos,
trxOnFailError);
trxOnFailClause.body.scope.define(trxOnFailErrorSym.name, trxOnFailErrorSym);
transactionDesugar.createRollbackIfFailed(pos, trxOnFailClause.body, trxOnFailErrorSym, trxBlockId);
BLangGroupExpr shouldNotPanic = new BLangGroupExpr();
shouldNotPanic.type = symTable.booleanType;
shouldNotPanic.expression = createNotBinaryExpression(pos, shouldPanicRef);
BLangSimpleVarRef caughtError = ASTBuilderUtil.createVariableRef(pos, trxOnFailErrorSym);
BLangBlockStmt failBlock = ASTBuilderUtil.createBlockStmt(pos);
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = pos;
panicNode.expr = caughtError;
BLangIf exitIf = ASTBuilderUtil.createIfElseStmt(pos, shouldNotPanic, failBlock, panicNode);
trxOnFailClause.body.stmts.add(exitIf);
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = pos;
failStmt.expr = caughtError;
failBlock.stmts.add(failStmt);
trxOnFailClause.bodyContainsFail = true;
return trxOnFailClause;
}
@Override
public void visit(BLangTransaction transactionNode) {
if (transactionNode.onFailClause != null) {
BLangOnFailClause onFailClause = transactionNode.onFailClause;
transactionNode.onFailClause = null;
transactionNode.transactionBody.isBreakable = false;
BLangDo doStmt = wrapStatementWithinDo(transactionNode.pos, transactionNode, onFailClause);
result = rewrite(doStmt, env);
} else {
BLangLiteral currentTrxBlockId = this.trxBlockId;
String uniqueId = String.valueOf(++transactionBlockCount);
this.trxBlockId = ASTBuilderUtil.createLiteral(transactionNode.pos, symTable.stringType, uniqueId);
boolean currShouldReturnErrors = this.shouldReturnErrors;
this.shouldReturnErrors = true;
BLangOnFailClause currOnFailClause = this.onFailClause;
BLangSimpleVariableDef currOnFailCallDef = this.onFailCallFuncDef;
BLangLiteral falseLiteral = ASTBuilderUtil.createLiteral(transactionNode.pos, symTable.booleanType, false);
BVarSymbol shouldPanicVarSymbol = new BVarSymbol(0, names.fromString("$shouldPanic$"),
env.scope.owner.pkgID, symTable.booleanType, this.env.scope.owner, transactionNode.pos, VIRTUAL);
shouldPanicVarSymbol.closure = true;
BLangSimpleVariable shouldPanicVariable = ASTBuilderUtil.createVariable(transactionNode.pos,
"$shouldPanic$", symTable.booleanType, falseLiteral, shouldPanicVarSymbol);
BLangSimpleVariableDef shouldPanicDef = ASTBuilderUtil.createVariableDef(transactionNode.pos,
shouldPanicVariable);
BLangSimpleVarRef shouldPanicRef = ASTBuilderUtil.createVariableRef(transactionNode.pos,
shouldPanicVarSymbol);
BLangOnFailClause trxInternalOnFail = createTrxInternalOnFail(transactionNode.pos, shouldPanicRef);
enclosingShouldPanic.put(trxInternalOnFail, shouldPanicRef);
boolean userDefinedOnFailAvbl = this.onFailClause != null;
analyzeOnFailClause(trxInternalOnFail, transactionNode.transactionBody);
BLangBlockStmt transactionStmtBlock =
transactionDesugar.rewrite(transactionNode, trxBlockId, env, uniqueId);
transactionStmtBlock.stmts.add(0, shouldPanicDef);
transactionStmtBlock.scope.define(shouldPanicVarSymbol.name, shouldPanicVarSymbol);
transactionStmtBlock.isBreakable = !userDefinedOnFailAvbl;
result = rewrite(transactionStmtBlock, this.env);
this.shouldReturnErrors = currShouldReturnErrors;
this.trxBlockId = currentTrxBlockId;
swapAndResetEnclosingOnFail(currOnFailClause, currOnFailCallDef);
}
}
@Override
public void visit(BLangRollback rollbackNode) {
BLangBlockStmt rollbackStmtExpr = transactionDesugar.desugar(rollbackNode, trxBlockId);
result = rewrite(rollbackStmtExpr, env);
}
private BLangOnFailClause createRetryInternalOnFail(Location pos,
BLangSimpleVarRef retryResultRef,
BLangSimpleVarRef retryManagerRef,
BLangSimpleVarRef shouldRetryRef,
BLangSimpleVarRef continueLoopRef,
BLangSimpleVarRef returnResult,
boolean shouldRollback) {
BLangOnFailClause internalOnFail = (BLangOnFailClause) TreeBuilder.createOnFailClauseNode();
internalOnFail.pos = pos;
internalOnFail.body = ASTBuilderUtil.createBlockStmt(pos);
internalOnFail.body.scope = new Scope(env.scope.owner);
BVarSymbol caughtErrorSym = new BVarSymbol(0, names.fromString("$caughtError$"),
env.scope.owner.pkgID, symTable.errorType, env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable caughtError = ASTBuilderUtil.createVariable(pos,
"$caughtError$", symTable.errorType, null, caughtErrorSym);
internalOnFail.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos,
caughtError);
env.scope.define(caughtErrorSym.name, caughtErrorSym);
BLangSimpleVarRef caughtErrorRef = ASTBuilderUtil.createVariableRef(pos, caughtErrorSym);
BLangAssignment errorAssignment = ASTBuilderUtil.createAssignmentStmt(pos, retryResultRef, caughtErrorRef);
internalOnFail.body.stmts.add(errorAssignment);
BLangAssignment continueLoopTrue = ASTBuilderUtil.createAssignmentStmt(pos, continueLoopRef,
ASTBuilderUtil.createLiteral(pos, symTable.booleanType, true));
internalOnFail.body.stmts.add(continueLoopTrue);
if (shouldRollback) {
transactionDesugar.createRollbackIfFailed(pos, internalOnFail.body, caughtErrorSym, trxBlockId);
}
BLangInvocation shouldRetryInvocation = createRetryManagerShouldRetryInvocation(pos,
retryManagerRef, caughtErrorRef);
BLangAssignment shouldRetryAssignment = ASTBuilderUtil.createAssignmentStmt(pos, shouldRetryRef,
shouldRetryInvocation);
internalOnFail.body.stmts.add(shouldRetryAssignment);
BLangGroupExpr shouldNotRetryCheck = new BLangGroupExpr();
shouldNotRetryCheck.type = symTable.booleanType;
shouldNotRetryCheck.expression = createNotBinaryExpression(pos, shouldRetryRef);
BLangGroupExpr exitCheck = new BLangGroupExpr();
exitCheck.type = symTable.booleanType;
exitCheck.expression = shouldNotRetryCheck;
BLangBlockStmt exitLogicBlock = ASTBuilderUtil.createBlockStmt(pos);
BLangIf exitIf = ASTBuilderUtil.createIfElseStmt(pos, exitCheck, exitLogicBlock, null);
if (this.onFailClause != null) {
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = pos;
failStmt.expr = retryResultRef;
exitLogicBlock.stmts.add(failStmt);
internalOnFail.bodyContainsFail = true;
internalOnFail.body.stmts.add(exitIf);
BLangContinue loopContinueStmt = (BLangContinue) TreeBuilder.createContinueNode();
loopContinueStmt.pos = pos;
internalOnFail.body.stmts.add(loopContinueStmt);
} else {
BLangAssignment returnErrorTrue = ASTBuilderUtil.createAssignmentStmt(pos, returnResult,
ASTBuilderUtil.createLiteral(pos, symTable.booleanType, true));
exitLogicBlock.stmts.add(returnErrorTrue);
internalOnFail.body.stmts.add(exitIf);
}
return internalOnFail;
}
BLangUnaryExpr createNotBinaryExpression(Location pos, BLangExpression expression) {
List<BType> paramTypes = new ArrayList<>();
paramTypes.add(symTable.booleanType);
BInvokableType type = new BInvokableType(paramTypes, symTable.booleanType,
null);
BOperatorSymbol notOperatorSymbol = new BOperatorSymbol(
names.fromString(OperatorKind.NOT.value()), symTable.rootPkgSymbol.pkgID, type, symTable.rootPkgSymbol,
symTable.builtinPos, VIRTUAL);
return ASTBuilderUtil.createUnaryExpr(pos, expression, symTable.booleanType,
OperatorKind.NOT, notOperatorSymbol);
}
BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
List<BLangSimpleVariable> lambdaFunctionVariable,
TypeNode returnType, BLangFunctionBody lambdaBody) {
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
BLangFunction func =
ASTBuilderUtil.createFunction(pos, functionNamePrefix + UNDERSCORE + lambdaFunctionCount++);
lambdaFunction.function = func;
func.requiredParams.addAll(lambdaFunctionVariable);
func.setReturnTypeNode(returnType);
func.desugaredReturnType = true;
defineFunction(func, env.enclPkg);
lambdaFunctionVariable = func.requiredParams;
func.body = lambdaBody;
func.desugared = false;
lambdaFunction.pos = pos;
List<BType> paramTypes = new ArrayList<>();
lambdaFunctionVariable.forEach(variable -> paramTypes.add(variable.symbol.type));
lambdaFunction.type = new BInvokableType(paramTypes, func.symbol.type.getReturnType(),
null);
return lambdaFunction;
}
protected BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
List<BLangSimpleVariable> lambdaFunctionVariable,
TypeNode returnType, List<BLangStatement> fnBodyStmts,
SymbolEnv env, Scope bodyScope) {
BLangBlockFunctionBody body = (BLangBlockFunctionBody) TreeBuilder.createBlockFunctionBodyNode();
body.scope = bodyScope;
SymbolEnv bodyEnv = SymbolEnv.createFuncBodyEnv(body, env);
this.forceCastReturnType = ((BLangType) returnType).type;
body.stmts = rewriteStmt(fnBodyStmts, bodyEnv);
this.forceCastReturnType = null;
return createLambdaFunction(pos, functionNamePrefix, lambdaFunctionVariable, returnType, body);
}
private BLangLambdaFunction createLambdaFunction(Location pos, String functionNamePrefix,
TypeNode returnType) {
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
BLangFunction func =
ASTBuilderUtil.createFunction(pos, functionNamePrefix + UNDERSCORE + lambdaFunctionCount++);
lambdaFunction.function = func;
func.setReturnTypeNode(returnType);
func.desugaredReturnType = true;
defineFunction(func, env.enclPkg);
func.desugared = false;
lambdaFunction.pos = pos;
return lambdaFunction;
}
private void defineFunction(BLangFunction funcNode, BLangPackage targetPkg) {
final BPackageSymbol packageSymbol = targetPkg.symbol;
final SymbolEnv packageEnv = this.symTable.pkgEnvMap.get(packageSymbol);
symbolEnter.defineNode(funcNode, packageEnv);
packageEnv.enclPkg.functions.add(funcNode);
packageEnv.enclPkg.topLevelNodes.add(funcNode);
}
@Override
public void visit(BLangForkJoin forkJoin) {
result = forkJoin;
}
@Override
public void visit(BLangLiteral literalExpr) {
if (literalExpr.type.tag == TypeTags.ARRAY && ((BArrayType) literalExpr.type).eType.tag == TypeTags.BYTE) {
result = rewriteBlobLiteral(literalExpr);
return;
}
result = literalExpr;
}
private BLangNode rewriteBlobLiteral(BLangLiteral literalExpr) {
String[] result = getBlobTextValue((String) literalExpr.value);
byte[] values;
if (BASE_64.equals(result[0])) {
values = Base64.getDecoder().decode(result[1].getBytes(StandardCharsets.UTF_8));
} else {
values = hexStringToByteArray(result[1]);
}
BLangArrayLiteral arrayLiteralNode = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
arrayLiteralNode.type = literalExpr.type;
arrayLiteralNode.pos = literalExpr.pos;
arrayLiteralNode.exprs = new ArrayList<>();
for (byte b : values) {
arrayLiteralNode.exprs.add(createByteLiteral(literalExpr.pos, b));
}
return arrayLiteralNode;
}
private String[] getBlobTextValue(String blobLiteralNodeText) {
String nodeText = blobLiteralNodeText.replaceAll(" ", "");
String[] result = new String[2];
result[0] = nodeText.substring(0, nodeText.indexOf('`'));
result[1] = nodeText.substring(nodeText.indexOf('`') + 1, nodeText.lastIndexOf('`'));
return result;
}
private static byte[] hexStringToByteArray(String str) {
int len = str.length();
byte[] data = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
data[i / 2] = (byte) ((Character.digit(str.charAt(i), 16) << 4) + Character.digit(str.charAt(i + 1), 16));
}
return data;
}
@Override
public void visit(BLangListConstructorExpr listConstructor) {
listConstructor.exprs = rewriteExprs(listConstructor.exprs);
BLangExpression expr;
if (listConstructor.type.tag == TypeTags.TUPLE) {
expr = new BLangTupleLiteral(listConstructor.pos, listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
} else if (listConstructor.type.tag == TypeTags.JSON) {
expr = new BLangJSONArrayLiteral(listConstructor.exprs, new BArrayType(listConstructor.type));
result = rewriteExpr(expr);
} else if (getElementType(listConstructor.type).tag == TypeTags.JSON) {
expr = new BLangJSONArrayLiteral(listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
} else if (listConstructor.type.tag == TypeTags.TYPEDESC) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = listConstructor.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
} else {
expr = new BLangArrayLiteral(listConstructor.pos, listConstructor.exprs, listConstructor.type);
result = rewriteExpr(expr);
}
}
@Override
public void visit(BLangTableConstructorExpr tableConstructorExpr) {
rewriteExprs(tableConstructorExpr.recordLiteralList);
result = tableConstructorExpr;
}
@Override
public void visit(BLangArrayLiteral arrayLiteral) {
arrayLiteral.exprs = rewriteExprs(arrayLiteral.exprs);
if (arrayLiteral.type.tag == TypeTags.JSON) {
result = new BLangJSONArrayLiteral(arrayLiteral.exprs, new BArrayType(arrayLiteral.type));
return;
} else if (getElementType(arrayLiteral.type).tag == TypeTags.JSON) {
result = new BLangJSONArrayLiteral(arrayLiteral.exprs, arrayLiteral.type);
return;
}
result = arrayLiteral;
}
@Override
public void visit(BLangTupleLiteral tupleLiteral) {
if (tupleLiteral.isTypedescExpr) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = tupleLiteral.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
return;
}
tupleLiteral.exprs.forEach(expr -> {
BType expType = expr.impConversionExpr == null ? expr.type : expr.impConversionExpr.type;
types.setImplicitCastExpr(expr, expType, symTable.anyType);
});
tupleLiteral.exprs = rewriteExprs(tupleLiteral.exprs);
result = tupleLiteral;
}
@Override
public void visit(BLangGroupExpr groupExpr) {
if (groupExpr.isTypedescExpr) {
final BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = groupExpr.typedescType;
typedescExpr.type = symTable.typeDesc;
result = rewriteExpr(typedescExpr);
} else {
result = rewriteExpr(groupExpr.expression);
}
}
@Override
public void visit(BLangRecordLiteral recordLiteral) {
List<RecordLiteralNode.RecordField> fields = recordLiteral.fields;
fields.sort((v1, v2) -> Boolean.compare(isComputedKey(v1), isComputedKey(v2)));
result = rewriteExpr(rewriteMappingConstructor(recordLiteral));
}
@Override
public void visit(BLangSimpleVarRef varRefExpr) {
BLangSimpleVarRef genVarRefExpr = varRefExpr;
if (varRefExpr.pkgSymbol != null && varRefExpr.pkgSymbol.tag == SymTag.XMLNS) {
BLangXMLQName qnameExpr = new BLangXMLQName(varRefExpr.variableName);
qnameExpr.nsSymbol = (BXMLNSSymbol) varRefExpr.pkgSymbol;
qnameExpr.localname = varRefExpr.variableName;
qnameExpr.prefix = varRefExpr.pkgAlias;
qnameExpr.namespaceURI = qnameExpr.nsSymbol.namespaceURI;
qnameExpr.isUsedInXML = false;
qnameExpr.pos = varRefExpr.pos;
qnameExpr.type = symTable.stringType;
result = qnameExpr;
return;
}
if (varRefExpr.symbol == null) {
result = varRefExpr;
return;
}
if ((varRefExpr.symbol.tag & SymTag.VARIABLE) == SymTag.VARIABLE) {
BVarSymbol varSymbol = (BVarSymbol) varRefExpr.symbol;
if (varSymbol.originalSymbol != null) {
varRefExpr.symbol = varSymbol.originalSymbol;
}
}
BSymbol ownerSymbol = varRefExpr.symbol.owner;
if ((varRefExpr.symbol.tag & SymTag.FUNCTION) == SymTag.FUNCTION &&
varRefExpr.symbol.type.tag == TypeTags.INVOKABLE) {
genVarRefExpr = new BLangFunctionVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((varRefExpr.symbol.tag & SymTag.TYPE) == SymTag.TYPE &&
!((varRefExpr.symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT)) {
genVarRefExpr = new BLangTypeLoad(varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.INVOKABLE) == SymTag.INVOKABLE ||
(ownerSymbol.tag & SymTag.LET) == SymTag.LET) {
genVarRefExpr = new BLangLocalVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.STRUCT) == SymTag.STRUCT) {
genVarRefExpr = new BLangFieldVarRef((BVarSymbol) varRefExpr.symbol);
} else if ((ownerSymbol.tag & SymTag.PACKAGE) == SymTag.PACKAGE ||
(ownerSymbol.tag & SymTag.SERVICE) == SymTag.SERVICE) {
if ((varRefExpr.symbol.tag & SymTag.CONSTANT) == SymTag.CONSTANT) {
BConstantSymbol constSymbol = (BConstantSymbol) varRefExpr.symbol;
if (constSymbol.literalType.tag <= TypeTags.BOOLEAN || constSymbol.literalType.tag == TypeTags.NIL) {
BLangLiteral literal = ASTBuilderUtil.createLiteral(varRefExpr.pos, constSymbol.literalType,
constSymbol.value.value);
result = rewriteExpr(addConversionExprIfRequired(literal, varRefExpr.type));
return;
}
}
genVarRefExpr = new BLangPackageVarRef((BVarSymbol) varRefExpr.symbol);
if (!enclLocks.isEmpty()) {
BVarSymbol symbol = (BVarSymbol) varRefExpr.symbol;
BLangLockStmt lockStmt = enclLocks.peek();
lockStmt.addLockVariable(symbol);
lockStmt.addLockVariable(this.globalVariablesDependsOn.getOrDefault(symbol, new HashSet<>()));
}
}
genVarRefExpr.type = varRefExpr.type;
genVarRefExpr.pos = varRefExpr.pos;
if ((varRefExpr.lhsVar)
|| genVarRefExpr.symbol.name.equals(IGNORE)) {
genVarRefExpr.lhsVar = varRefExpr.lhsVar;
genVarRefExpr.type = varRefExpr.symbol.type;
result = genVarRefExpr;
return;
}
genVarRefExpr.lhsVar = varRefExpr.lhsVar;
BType targetType = genVarRefExpr.type;
genVarRefExpr.type = genVarRefExpr.symbol.type;
BLangExpression expression = addConversionExprIfRequired(genVarRefExpr, targetType);
result = expression.impConversionExpr != null ? expression.impConversionExpr : expression;
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr) {
if (safeNavigate(fieldAccessExpr)) {
result = rewriteExpr(rewriteSafeNavigationExpr(fieldAccessExpr));
return;
}
BLangAccessExpression targetVarRef = fieldAccessExpr;
BType varRefType = fieldAccessExpr.expr.type;
fieldAccessExpr.expr = rewriteExpr(fieldAccessExpr.expr);
if (!types.isSameType(fieldAccessExpr.expr.type, varRefType)) {
fieldAccessExpr.expr = addConversionExprIfRequired(fieldAccessExpr.expr, varRefType);
}
BLangLiteral stringLit = createStringLiteral(fieldAccessExpr.field.pos,
StringEscapeUtils.unescapeJava(fieldAccessExpr.field.value));
int varRefTypeTag = varRefType.tag;
if (varRefTypeTag == TypeTags.OBJECT ||
(varRefTypeTag == TypeTags.UNION &&
((BUnionType) varRefType).getMemberTypes().iterator().next().tag == TypeTags.OBJECT)) {
if (fieldAccessExpr.symbol != null && fieldAccessExpr.symbol.type.tag == TypeTags.INVOKABLE &&
((fieldAccessExpr.symbol.flags & Flags.ATTACHED) == Flags.ATTACHED)) {
result = rewriteObjectMemberAccessAsField(fieldAccessExpr);
return;
} else {
boolean isStoreOnCreation = fieldAccessExpr.isStoreOnCreation;
if (!isStoreOnCreation && varRefTypeTag == TypeTags.OBJECT && env.enclInvokable != null) {
BInvokableSymbol originalFuncSymbol = ((BLangFunction) env.enclInvokable).originalFuncSymbol;
BObjectTypeSymbol objectTypeSymbol = (BObjectTypeSymbol) varRefType.tsymbol;
BAttachedFunction initializerFunc = objectTypeSymbol.initializerFunc;
BAttachedFunction generatedInitializerFunc = objectTypeSymbol.generatedInitializerFunc;
if ((generatedInitializerFunc != null && originalFuncSymbol == generatedInitializerFunc.symbol) ||
(initializerFunc != null && originalFuncSymbol == initializerFunc.symbol)) {
isStoreOnCreation = true;
}
}
targetVarRef = new BLangStructFieldAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
(BVarSymbol) fieldAccessExpr.symbol, false,
isStoreOnCreation);
}
} else if (varRefTypeTag == TypeTags.RECORD ||
(varRefTypeTag == TypeTags.UNION &&
((BUnionType) varRefType).getMemberTypes().iterator().next().tag == TypeTags.RECORD)) {
if (fieldAccessExpr.symbol != null && fieldAccessExpr.symbol.type.tag == TypeTags.INVOKABLE
&& ((fieldAccessExpr.symbol.flags & Flags.ATTACHED) == Flags.ATTACHED)) {
targetVarRef = new BLangStructFunctionVarRef(fieldAccessExpr.expr, (BVarSymbol) fieldAccessExpr.symbol);
} else {
targetVarRef = new BLangStructFieldAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
(BVarSymbol) fieldAccessExpr.symbol, false, fieldAccessExpr.isStoreOnCreation);
}
} else if (types.isLax(varRefType)) {
if (!(varRefType.tag == TypeTags.XML || varRefType.tag == TypeTags.XML_ELEMENT)) {
if (varRefType.tag == TypeTags.MAP && TypeTags.isXMLTypeTag(((BMapType) varRefType).constraint.tag)) {
result = rewriteExpr(rewriteLaxMapAccess(fieldAccessExpr));
return;
}
fieldAccessExpr.expr = addConversionExprIfRequired(fieldAccessExpr.expr, symTable.jsonType);
targetVarRef = new BLangJSONAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit);
} else {
targetVarRef = rewriteXMLAttributeOrElemNameAccess(fieldAccessExpr);
}
} else if (varRefTypeTag == TypeTags.MAP) {
targetVarRef = new BLangMapAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
fieldAccessExpr.isStoreOnCreation);
} else if (TypeTags.isXMLTypeTag(varRefTypeTag)) {
targetVarRef = new BLangXMLAccessExpr(fieldAccessExpr.pos, fieldAccessExpr.expr, stringLit,
fieldAccessExpr.fieldKind);
}
targetVarRef.lhsVar = fieldAccessExpr.lhsVar;
targetVarRef.type = fieldAccessExpr.type;
targetVarRef.optionalFieldAccess = fieldAccessExpr.optionalFieldAccess;
result = targetVarRef;
}
private BLangNode rewriteObjectMemberAccessAsField(BLangFieldBasedAccess fieldAccessExpr) {
Location pos = fieldAccessExpr.pos;
BInvokableSymbol originalMemberFuncSymbol = (BInvokableSymbol) fieldAccessExpr.symbol;
BLangFunction func = (BLangFunction) TreeBuilder.createFunctionNode();
String funcName = "$annon$method$delegate$" + lambdaFunctionCount++;
BInvokableSymbol funcSymbol = new BInvokableSymbol(SymTag.INVOKABLE, (Flags.ANONYMOUS | Flags.LAMBDA),
names.fromString(funcName),
env.enclPkg.packageID, originalMemberFuncSymbol.type, env.scope.owner, pos, VIRTUAL);
funcSymbol.retType = originalMemberFuncSymbol.retType;
funcSymbol.bodyExist = true;
funcSymbol.params = new ArrayList<>();
funcSymbol.scope = new Scope(funcSymbol);
func.pos = pos;
func.name = createIdentifier(pos, funcName);
func.flagSet.add(Flag.LAMBDA);
func.flagSet.add(Flag.ANONYMOUS);
func.body = (BLangBlockFunctionBody) TreeBuilder.createBlockFunctionBodyNode();
func.symbol = funcSymbol;
func.type = funcSymbol.type;
func.closureVarSymbols = new LinkedHashSet<>();
BLangExpression receiver = fieldAccessExpr.expr;
BLangSimpleVariableDef intermediateObjDef = null;
if (receiver.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol receiverSymbol = ((BLangVariableReference) receiver).symbol;
receiverSymbol.closure = true;
func.closureVarSymbols.add(new ClosureVarSymbol(receiverSymbol, pos));
} else {
BLangSimpleVariableDef varDef = createVarDef("$$temp$obj$" + annonVarCount++, receiver.type, receiver, pos);
intermediateObjDef = varDef;
varDef.var.symbol.closure = true;
env.scope.define(varDef.var.symbol.name, varDef.var.symbol);
BLangSimpleVarRef variableRef = createVariableRef(pos, varDef.var.symbol);
func.closureVarSymbols.add(new ClosureVarSymbol(varDef.var.symbol, pos));
receiver = variableRef;
}
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
for (BVarSymbol param : originalMemberFuncSymbol.params) {
BLangSimpleVariable fParam = (BLangSimpleVariable) TreeBuilder.createSimpleVariableNode();
fParam.symbol = new BVarSymbol(0, param.name, env.enclPkg.packageID, param.type, funcSymbol, pos,
VIRTUAL);
fParam.pos = pos;
fParam.name = createIdentifier(pos, param.name.value);
fParam.type = param.type;
func.requiredParams.add(fParam);
funcSymbol.params.add(fParam.symbol);
funcSymbol.scope.define(fParam.symbol.name, fParam.symbol);
BLangSimpleVarRef paramRef = createVariableRef(pos, fParam.symbol);
requiredArgs.add(paramRef);
}
ArrayList<BLangExpression> restArgs = new ArrayList<>();
if (originalMemberFuncSymbol.restParam != null) {
BLangSimpleVariable restParam = (BLangSimpleVariable) TreeBuilder.createSimpleVariableNode();
func.restParam = restParam;
BVarSymbol restSym = originalMemberFuncSymbol.restParam;
restParam.name = ASTBuilderUtil.createIdentifier(pos, restSym.name.value);
restParam.symbol = new BVarSymbol(0, restSym.name, env.enclPkg.packageID, restSym.type, funcSymbol, pos,
VIRTUAL);
restParam.pos = pos;
restParam.type = restSym.type;
funcSymbol.restParam = restParam.symbol;
funcSymbol.scope.define(restParam.symbol.name, restParam.symbol);
BLangSimpleVarRef restArg = createVariableRef(pos, restParam.symbol);
BLangRestArgsExpression restArgExpr = new BLangRestArgsExpression();
restArgExpr.expr = restArg;
restArgExpr.pos = pos;
restArgExpr.type = restSym.type;
restArgExpr.expectedType = restArgExpr.type;
restArgs.add(restArgExpr);
}
BLangIdentifier field = fieldAccessExpr.field;
BLangReturn retStmt = (BLangReturn) TreeBuilder.createReturnNode();
retStmt.expr = createObjectMethodInvocation(
receiver, field, fieldAccessExpr.symbol, requiredArgs, restArgs);
((BLangBlockFunctionBody) func.body).addStatement(retStmt);
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
lambdaFunction.function = func;
lambdaFunction.capturedClosureEnv = env.createClone();
env.enclPkg.functions.add(func);
env.enclPkg.topLevelNodes.add(func);
lambdaFunction.parent = env.enclInvokable;
lambdaFunction.type = func.type;
if (intermediateObjDef == null) {
return rewrite(lambdaFunction, env);
} else {
BLangStatementExpression expr = createStatementExpression(intermediateObjDef, rewrite(lambdaFunction, env));
expr.type = lambdaFunction.type;
return rewrite(expr, env);
}
}
private BLangInvocation createObjectMethodInvocation(BLangExpression receiver, BLangIdentifier field,
BSymbol invocableSymbol,
List<BLangExpression> requiredArgs,
List<BLangExpression> restArgs) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.name = field;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = receiver;
invocationNode.symbol = invocableSymbol;
invocationNode.type = ((BInvokableType) invocableSymbol.type).retType;
invocationNode.requiredArgs = requiredArgs;
invocationNode.restArgs = restArgs;
return invocationNode;
}
private BLangStatementExpression rewriteLaxMapAccess(BLangFieldBasedAccess fieldAccessExpr) {
BLangStatementExpression statementExpression = new BLangStatementExpression();
BLangBlockStmt block = new BLangBlockStmt();
statementExpression.stmt = block;
BUnionType fieldAccessType = BUnionType.create(null, fieldAccessExpr.type, symTable.errorType);
Location pos = fieldAccessExpr.pos;
BLangSimpleVariableDef result = createVarDef("$mapAccessResult$", fieldAccessType, null, pos);
block.addStatement(result);
BLangSimpleVarRef resultRef = ASTBuilderUtil.createVariableRef(pos, result.var.symbol);
resultRef.type = fieldAccessType;
statementExpression.type = fieldAccessType;
BLangLiteral mapIndex = ASTBuilderUtil.createLiteral(
fieldAccessExpr.field.pos, symTable.stringType, fieldAccessExpr.field.value);
BLangMapAccessExpr mapAccessExpr = new BLangMapAccessExpr(pos, fieldAccessExpr.expr, mapIndex);
BUnionType xmlOrNil = BUnionType.create(null, fieldAccessExpr.type, symTable.nilType);
mapAccessExpr.type = xmlOrNil;
BLangSimpleVariableDef mapResult = createVarDef("$mapAccess", xmlOrNil, mapAccessExpr, pos);
BLangSimpleVarRef mapResultRef = ASTBuilderUtil.createVariableRef(pos, mapResult.var.symbol);
block.addStatement(mapResult);
BLangIf ifStmt = ASTBuilderUtil.createIfStmt(pos, block);
BLangIsLikeExpr isLikeNilExpr = createIsLikeExpression(pos, mapResultRef, symTable.nilType);
ifStmt.expr = isLikeNilExpr;
BLangBlockStmt resultNilBody = new BLangBlockStmt();
ifStmt.body = resultNilBody;
BLangBlockStmt resultHasValueBody = new BLangBlockStmt();
ifStmt.elseStmt = resultHasValueBody;
BLangErrorConstructorExpr errorConstructorExpr =
(BLangErrorConstructorExpr) TreeBuilder.createErrorConstructorExpressionNode();
BSymbol symbol = symResolver.lookupMainSpaceSymbolInPackage(errorConstructorExpr.pos, env,
names.fromString(""), names.fromString("error"));
errorConstructorExpr.type = symbol.type;
List<BLangExpression> positionalArgs = new ArrayList<>();
List<BLangNamedArgsExpression> namedArgs = new ArrayList<>();
positionalArgs.add(createStringLiteral(pos, "{" + RuntimeConstants.MAP_LANG_LIB + "}InvalidKey"));
BLangNamedArgsExpression message = new BLangNamedArgsExpression();
message.name = ASTBuilderUtil.createIdentifier(pos, "key");
message.expr = createStringLiteral(pos, fieldAccessExpr.field.value);
namedArgs.add(message);
errorConstructorExpr.positionalArgs = positionalArgs;
errorConstructorExpr.namedArgs = namedArgs;
BLangSimpleVariableDef errorDef =
createVarDef("$_invalid_key_error", symTable.errorType, errorConstructorExpr, pos);
resultNilBody.addStatement(errorDef);
BLangSimpleVarRef errorRef = ASTBuilderUtil.createVariableRef(pos, errorDef.var.symbol);
BLangAssignment errorVarAssignment = ASTBuilderUtil.createAssignmentStmt(pos, resultNilBody);
errorVarAssignment.varRef = resultRef;
errorVarAssignment.expr = errorRef;
BLangAssignment mapResultAssignment = ASTBuilderUtil.createAssignmentStmt(
pos, resultHasValueBody);
mapResultAssignment.varRef = resultRef;
mapResultAssignment.expr = mapResultRef;
statementExpression.expr = resultRef;
return statementExpression;
}
private BLangAccessExpression rewriteXMLAttributeOrElemNameAccess(BLangFieldBasedAccess fieldAccessExpr) {
ArrayList<BLangExpression> args = new ArrayList<>();
String fieldName = fieldAccessExpr.field.value;
if (fieldAccessExpr.fieldKind == FieldKind.WITH_NS) {
BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess nsPrefixAccess =
(BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) fieldAccessExpr;
fieldName = createExpandedQName(nsPrefixAccess.nsSymbol.namespaceURI, fieldName);
}
if (fieldName.equals("_")) {
return createLanglibXMLInvocation(fieldAccessExpr.pos, XML_INTERNAL_GET_ELEMENT_NAME_NIL_LIFTING,
fieldAccessExpr.expr, new ArrayList<>(), new ArrayList<>());
}
BLangLiteral attributeNameLiteral = createStringLiteral(fieldAccessExpr.field.pos, fieldName);
args.add(attributeNameLiteral);
args.add(isOptionalAccessToLiteral(fieldAccessExpr));
return createLanglibXMLInvocation(fieldAccessExpr.pos, XML_INTERNAL_GET_ATTRIBUTE, fieldAccessExpr.expr, args,
new ArrayList<>());
}
private BLangExpression isOptionalAccessToLiteral(BLangFieldBasedAccess fieldAccessExpr) {
return rewrite(
createLiteral(fieldAccessExpr.pos, symTable.booleanType, fieldAccessExpr.isOptionalFieldAccess()), env);
}
private String createExpandedQName(String nsURI, String localName) {
return "{" + nsURI + "}" + localName;
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr) {
if (safeNavigate(indexAccessExpr)) {
result = rewriteExpr(rewriteSafeNavigationExpr(indexAccessExpr));
return;
}
BLangIndexBasedAccess targetVarRef = indexAccessExpr;
indexAccessExpr.indexExpr = rewriteExpr(indexAccessExpr.indexExpr);
BType varRefType = indexAccessExpr.expr.type;
indexAccessExpr.expr = rewriteExpr(indexAccessExpr.expr);
if (!types.isSameType(indexAccessExpr.expr.type, varRefType)) {
indexAccessExpr.expr = addConversionExprIfRequired(indexAccessExpr.expr, varRefType);
}
if (varRefType.tag == TypeTags.MAP) {
targetVarRef = new BLangMapAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr, indexAccessExpr.isStoreOnCreation);
} else if (types.isSubTypeOfMapping(types.getSafeType(varRefType, true, false))) {
targetVarRef = new BLangStructFieldAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr,
(BVarSymbol) indexAccessExpr.symbol, false);
} else if (types.isSubTypeOfList(varRefType)) {
targetVarRef = new BLangArrayAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (types.isAssignable(varRefType, symTable.stringType)) {
indexAccessExpr.expr = addConversionExprIfRequired(indexAccessExpr.expr, symTable.stringType);
targetVarRef = new BLangStringAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (TypeTags.isXMLTypeTag(varRefType.tag)) {
targetVarRef = new BLangXMLAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
} else if (varRefType.tag == TypeTags.TABLE) {
if (targetVarRef.indexExpr.getKind() == NodeKind.TABLE_MULTI_KEY) {
BLangTupleLiteral listConstructorExpr = new BLangTupleLiteral();
listConstructorExpr.exprs = ((BLangTableMultiKeyExpr) indexAccessExpr.indexExpr).multiKeyIndexExprs;
List<BType> memberTypes = new ArrayList<>();
((BLangTableMultiKeyExpr) indexAccessExpr.indexExpr).multiKeyIndexExprs.
forEach(expression -> memberTypes.add(expression.type));
listConstructorExpr.type = new BTupleType(memberTypes);
indexAccessExpr.indexExpr = listConstructorExpr;
}
targetVarRef = new BLangTableAccessExpr(indexAccessExpr.pos, indexAccessExpr.expr,
indexAccessExpr.indexExpr);
}
targetVarRef.lhsVar = indexAccessExpr.lhsVar;
targetVarRef.type = indexAccessExpr.type;
result = targetVarRef;
}
@Override
public void visit(BLangTableMultiKeyExpr tableMultiKeyExpr) {
rewriteExprs(tableMultiKeyExpr.multiKeyIndexExprs);
result = tableMultiKeyExpr;
}
@Override
public void visit(BLangInvocation iExpr) {
rewriteInvocation(iExpr, false);
}
@Override
public void visit(BLangErrorConstructorExpr errorConstructorExpr) {
if (errorConstructorExpr.positionalArgs.size() == 1) {
errorConstructorExpr.positionalArgs.add(createNilLiteral());
}
errorConstructorExpr.positionalArgs.set(1,
addConversionExprIfRequired(errorConstructorExpr.positionalArgs.get(1), symTable.errorType));
rewriteExprs(errorConstructorExpr.positionalArgs);
BLangExpression errorDetail;
BLangRecordLiteral recordLiteral = ASTBuilderUtil.createEmptyRecordLiteral(errorConstructorExpr.pos,
((BErrorType) errorConstructorExpr.type).detailType);
if (errorConstructorExpr.namedArgs.isEmpty()) {
errorDetail = visitCloneReadonly(rewriteExpr(recordLiteral), recordLiteral.type);
} else {
for (BLangNamedArgsExpression namedArg : errorConstructorExpr.namedArgs) {
BLangRecordLiteral.BLangRecordKeyValueField member = new BLangRecordLiteral.BLangRecordKeyValueField();
member.key = new BLangRecordLiteral.BLangRecordKey(ASTBuilderUtil.createLiteral(namedArg.name.pos,
symTable.stringType, namedArg.name.value));
if (recordLiteral.type.tag == TypeTags.RECORD) {
member.valueExpr = addConversionExprIfRequired(namedArg.expr, symTable.anyType);
} else {
member.valueExpr = addConversionExprIfRequired(namedArg.expr, namedArg.expr.type);
}
recordLiteral.fields.add(member);
}
errorDetail = visitCloneReadonly(rewriteExpr(recordLiteral),
((BErrorType) errorConstructorExpr.type).detailType);
}
errorConstructorExpr.errorDetail = errorDetail;
result = errorConstructorExpr;
}
@Override
public void visit(BLangInvocation.BLangActionInvocation actionInvocation) {
if (!actionInvocation.async && actionInvocation.invokedInsideTransaction) {
transactionDesugar.startTransactionCoordinatorOnce(env, actionInvocation.pos);
}
rewriteInvocation(actionInvocation, actionInvocation.async);
}
private void rewriteInvocation(BLangInvocation invocation, boolean async) {
BLangInvocation invRef = invocation;
if (!enclLocks.isEmpty()) {
BLangLockStmt lock = enclLocks.peek();
lock.lockVariables.addAll(((BInvokableSymbol) invocation.symbol).dependentGlobalVars);
}
reorderArguments(invocation);
invocation.requiredArgs = rewriteExprs(invocation.requiredArgs);
fixNonRestArgTypeCastInTypeParamInvocation(invocation);
invocation.restArgs = rewriteExprs(invocation.restArgs);
annotationDesugar.defineStatementAnnotations(invocation.annAttachments, invocation.pos,
invocation.symbol.pkgID, invocation.symbol.owner, env);
if (invocation.functionPointerInvocation) {
visitFunctionPointerInvocation(invocation);
return;
}
invocation.expr = rewriteExpr(invocation.expr);
result = invRef;
BInvokableSymbol invSym = (BInvokableSymbol) invocation.symbol;
if (Symbols.isFlagOn(invSym.retType.flags, Flags.PARAMETERIZED)) {
BType retType = typeBuilder.build(invSym.retType);
invocation.type = retType;
}
if (invocation.expr == null) {
fixTypeCastInTypeParamInvocation(invocation, invRef);
if (invocation.exprSymbol == null) {
return;
}
invocation.expr = ASTBuilderUtil.createVariableRef(invocation.pos, invocation.exprSymbol);
invocation.expr = rewriteExpr(invocation.expr);
}
switch (invocation.expr.type.tag) {
case TypeTags.OBJECT:
case TypeTags.RECORD:
if (!invocation.langLibInvocation) {
List<BLangExpression> argExprs = new ArrayList<>(invocation.requiredArgs);
argExprs.add(0, invocation.expr);
BLangAttachedFunctionInvocation attachedFunctionInvocation =
new BLangAttachedFunctionInvocation(invocation.pos, argExprs, invocation.restArgs,
invocation.symbol, invocation.type, invocation.expr,
async);
attachedFunctionInvocation.name = invocation.name;
attachedFunctionInvocation.annAttachments = invocation.annAttachments;
result = invRef = attachedFunctionInvocation;
}
break;
}
fixTypeCastInTypeParamInvocation(invocation, invRef);
}
private void fixNonRestArgTypeCastInTypeParamInvocation(BLangInvocation iExpr) {
if (!iExpr.langLibInvocation) {
return;
}
List<BLangExpression> requiredArgs = iExpr.requiredArgs;
List<BVarSymbol> params = ((BInvokableSymbol) iExpr.symbol).params;
for (int i = 1; i < requiredArgs.size(); i++) {
requiredArgs.set(i, addConversionExprIfRequired(requiredArgs.get(i), params.get(i).type));
}
}
/* This function is a workaround and need improvement
* Notes for improvement :
* 1. Both arguments are same.
* 2. Due to current type param logic we put type param flag on the original type.
* 3. Error type having Cloneable type with type param flag, change expression type by this code.
* 4. using error type is a problem as Cloneable type is an typeparm eg: ExprBodiedFunctionTest
* added never to CloneableType type param
* @typeParam type
* CloneableType Cloneable|never;
*
*/
private void fixTypeCastInTypeParamInvocation(BLangInvocation iExpr, BLangInvocation genIExpr) {
var returnTypeOfInvokable = ((BInvokableSymbol) iExpr.symbol).retType;
if (iExpr.langLibInvocation || TypeParamAnalyzer.containsTypeParam(returnTypeOfInvokable)) {
BType originalInvType = genIExpr.type;
if (!genIExpr.async) {
genIExpr.type = returnTypeOfInvokable;
}
BLangExpression expr = addConversionExprIfRequired(genIExpr, originalInvType);
if (expr.getKind() == NodeKind.TYPE_CONVERSION_EXPR) {
this.result = expr;
return;
}
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode();
conversionExpr.expr = genIExpr;
conversionExpr.targetType = originalInvType;
conversionExpr.type = originalInvType;
conversionExpr.pos = genIExpr.pos;
this.result = conversionExpr;
}
}
private BLangLiteral createNilLiteral() {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = null;
literal.type = symTable.nilType;
return literal;
}
public void visit(BLangTypeInit typeInitExpr) {
if (typeInitExpr.type.tag == TypeTags.STREAM) {
result = rewriteExpr(desugarStreamTypeInit(typeInitExpr));
} else {
result = rewrite(desugarObjectTypeInit(typeInitExpr), env);
}
}
private BLangStatementExpression desugarObjectTypeInit(BLangTypeInit typeInitExpr) {
typeInitExpr.desugared = true;
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(typeInitExpr.pos);
BType objType = getObjectType(typeInitExpr.type);
BLangSimpleVariableDef objVarDef = createVarDef("$obj$", objType, typeInitExpr, typeInitExpr.pos);
objVarDef.var.name.pos = symTable.builtinPos;
BLangSimpleVarRef objVarRef = ASTBuilderUtil.createVariableRef(typeInitExpr.pos, objVarDef.var.symbol);
blockStmt.addStatement(objVarDef);
typeInitExpr.initInvocation.exprSymbol = objVarDef.var.symbol;
typeInitExpr.initInvocation.symbol = ((BObjectTypeSymbol) objType.tsymbol).generatedInitializerFunc.symbol;
if (typeInitExpr.initInvocation.type.tag == TypeTags.NIL) {
BLangExpressionStmt initInvExpr = ASTBuilderUtil.createExpressionStmt(typeInitExpr.pos, blockStmt);
initInvExpr.expr = typeInitExpr.initInvocation;
typeInitExpr.initInvocation.name.value = Names.GENERATED_INIT_SUFFIX.value;
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, objVarRef);
stmtExpr.type = objVarRef.symbol.type;
return stmtExpr;
}
BLangSimpleVariableDef initInvRetValVarDef = createVarDef("$temp$", typeInitExpr.initInvocation.type,
typeInitExpr.initInvocation, typeInitExpr.pos);
blockStmt.addStatement(initInvRetValVarDef);
BLangSimpleVariableDef resultVarDef = createVarDef("$result$", typeInitExpr.type, null, typeInitExpr.pos);
blockStmt.addStatement(resultVarDef);
BLangSimpleVarRef initRetValVarRefInCondition =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, initInvRetValVarDef.var.symbol);
BLangBlockStmt thenStmt = ASTBuilderUtil.createBlockStmt(symTable.builtinPos);
BLangTypeTestExpr isErrorTest =
ASTBuilderUtil.createTypeTestExpr(symTable.builtinPos, initRetValVarRefInCondition, getErrorTypeNode());
isErrorTest.type = symTable.booleanType;
BLangSimpleVarRef thenInitRetValVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, initInvRetValVarDef.var.symbol);
BLangSimpleVarRef thenResultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangAssignment errAssignment =
ASTBuilderUtil.createAssignmentStmt(symTable.builtinPos, thenResultVarRef, thenInitRetValVarRef);
thenStmt.addStatement(errAssignment);
BLangSimpleVarRef elseResultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangAssignment objAssignment =
ASTBuilderUtil.createAssignmentStmt(symTable.builtinPos, elseResultVarRef, objVarRef);
BLangBlockStmt elseStmt = ASTBuilderUtil.createBlockStmt(symTable.builtinPos);
elseStmt.addStatement(objAssignment);
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(symTable.builtinPos, isErrorTest, thenStmt, elseStmt);
blockStmt.addStatement(ifelse);
BLangSimpleVarRef resultVarRef =
ASTBuilderUtil.createVariableRef(symTable.builtinPos, resultVarDef.var.symbol);
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = resultVarRef.symbol.type;
return stmtExpr;
}
private BLangInvocation desugarStreamTypeInit(BLangTypeInit typeInitExpr) {
BInvokableSymbol symbol = (BInvokableSymbol) symTable.langInternalModuleSymbol.scope
.lookup(Names.CONSTRUCT_STREAM).symbol;
BType targetType = ((BStreamType) typeInitExpr.type).constraint;
BType errorType = ((BStreamType) typeInitExpr.type).error;
BType typedescType = new BTypedescType(targetType, symTable.typeDesc.tsymbol);
BLangTypedescExpr typedescExpr = new BLangTypedescExpr();
typedescExpr.resolvedType = targetType;
typedescExpr.type = typedescType;
List<BLangExpression> args = new ArrayList<>(Lists.of(typedescExpr));
if (!typeInitExpr.argsExpr.isEmpty()) {
args.add(typeInitExpr.argsExpr.get(0));
}
BLangInvocation streamConstructInvocation = ASTBuilderUtil.createInvocationExprForMethod(
typeInitExpr.pos, symbol, args, symResolver);
streamConstructInvocation.type = new BStreamType(TypeTags.STREAM, targetType, errorType, null);
return streamConstructInvocation;
}
private BLangSimpleVariableDef createVarDef(String name, BType type, BLangExpression expr,
Location location) {
BSymbol objSym = symResolver.lookupSymbolInMainSpace(env, names.fromString(name));
if (objSym == null || objSym == symTable.notFoundSymbol) {
objSym = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID, type,
this.env.scope.owner, location, VIRTUAL);
}
BLangSimpleVariable objVar = ASTBuilderUtil.createVariable(location, name, type, expr, (BVarSymbol) objSym);
BLangSimpleVariableDef objVarDef = ASTBuilderUtil.createVariableDef(location);
objVarDef.var = objVar;
objVarDef.type = objVar.type;
return objVarDef;
}
private BType getObjectType(BType type) {
if (type.tag == TypeTags.OBJECT) {
return type;
} else if (type.tag == TypeTags.UNION) {
return ((BUnionType) type).getMemberTypes().stream()
.filter(t -> t.tag == TypeTags.OBJECT)
.findFirst()
.orElse(symTable.noType);
}
throw new IllegalStateException("None object type '" + type.toString() + "' found in object init context");
}
BLangErrorType getErrorTypeNode() {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = symTable.errorType;
errorTypeNode.pos = symTable.builtinPos;
return errorTypeNode;
}
BLangErrorType getErrorOrNillTypeNode() {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = symTable.errorOrNilType;
return errorTypeNode;
}
@Override
public void visit(BLangTernaryExpr ternaryExpr) {
/*
* First desugar to if-else:
*
* T $result$;
* if () {
* $result$ = thenExpr;
* } else {
* $result$ = elseExpr;
* }
*
*/
BLangSimpleVariableDef resultVarDef = createVarDef("$ternary_result$", ternaryExpr.type, null, ternaryExpr.pos);
BLangBlockStmt thenBody = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos);
BLangBlockStmt elseBody = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos);
BLangSimpleVarRef thenResultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangAssignment thenAssignment =
ASTBuilderUtil.createAssignmentStmt(ternaryExpr.pos, thenResultVarRef, ternaryExpr.thenExpr);
thenBody.addStatement(thenAssignment);
BLangSimpleVarRef elseResultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangAssignment elseAssignment =
ASTBuilderUtil.createAssignmentStmt(ternaryExpr.pos, elseResultVarRef, ternaryExpr.elseExpr);
elseBody.addStatement(elseAssignment);
BLangSimpleVarRef resultVarRef = ASTBuilderUtil.createVariableRef(ternaryExpr.pos, resultVarDef.var.symbol);
BLangIf ifElse = ASTBuilderUtil.createIfElseStmt(ternaryExpr.pos, ternaryExpr.expr, thenBody, elseBody);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(ternaryExpr.pos, Lists.of(resultVarDef, ifElse));
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = ternaryExpr.type;
result = rewriteExpr(stmtExpr);
}
@Override
public void visit(BLangWaitExpr waitExpr) {
if (waitExpr.getExpression().getKind() == NodeKind.BINARY_EXPR) {
waitExpr.exprList = collectAllBinaryExprs((BLangBinaryExpr) waitExpr.getExpression(), new ArrayList<>());
} else {
waitExpr.exprList = Collections.singletonList(rewriteExpr(waitExpr.getExpression()));
}
result = waitExpr;
}
private List<BLangExpression> collectAllBinaryExprs(BLangBinaryExpr binaryExpr, List<BLangExpression> exprs) {
visitBinaryExprOfWait(binaryExpr.lhsExpr, exprs);
visitBinaryExprOfWait(binaryExpr.rhsExpr, exprs);
return exprs;
}
private void visitBinaryExprOfWait(BLangExpression expr, List<BLangExpression> exprs) {
if (expr.getKind() == NodeKind.BINARY_EXPR) {
collectAllBinaryExprs((BLangBinaryExpr) expr, exprs);
} else {
expr = rewriteExpr(expr);
exprs.add(expr);
}
}
@Override
public void visit(BLangWaitForAllExpr waitExpr) {
waitExpr.keyValuePairs.forEach(keyValue -> {
if (keyValue.valueExpr != null) {
keyValue.valueExpr = rewriteExpr(keyValue.valueExpr);
} else {
keyValue.keyExpr = rewriteExpr(keyValue.keyExpr);
}
});
BLangExpression expr = new BLangWaitForAllExpr.BLangWaitLiteral(waitExpr.keyValuePairs, waitExpr.type);
result = rewriteExpr(expr);
}
@Override
public void visit(BLangTrapExpr trapExpr) {
trapExpr.expr = rewriteExpr(trapExpr.expr);
if (trapExpr.expr.type.tag != TypeTags.NIL) {
trapExpr.expr = addConversionExprIfRequired(trapExpr.expr, trapExpr.type);
}
result = trapExpr;
}
@Override
public void visit(BLangBinaryExpr binaryExpr) {
if (binaryExpr.opKind == OperatorKind.HALF_OPEN_RANGE || binaryExpr.opKind == OperatorKind.CLOSED_RANGE) {
if (binaryExpr.opKind == OperatorKind.HALF_OPEN_RANGE) {
binaryExpr.rhsExpr = getModifiedIntRangeEndExpr(binaryExpr.rhsExpr);
}
result = rewriteExpr(replaceWithIntRange(binaryExpr.pos, binaryExpr.lhsExpr, binaryExpr.rhsExpr));
return;
}
if (binaryExpr.opKind == OperatorKind.AND || binaryExpr.opKind == OperatorKind.OR) {
visitBinaryLogicalExpr(binaryExpr);
return;
}
OperatorKind binaryOpKind = binaryExpr.opKind;
if (binaryOpKind == OperatorKind.ADD || binaryOpKind == OperatorKind.SUB ||
binaryOpKind == OperatorKind.MUL || binaryOpKind == OperatorKind.DIV ||
binaryOpKind == OperatorKind.MOD || binaryOpKind == OperatorKind.BITWISE_AND ||
binaryOpKind == OperatorKind.BITWISE_OR || binaryOpKind == OperatorKind.BITWISE_XOR) {
checkByteTypeIncompatibleOperations(binaryExpr);
}
binaryExpr.lhsExpr = rewriteExpr(binaryExpr.lhsExpr);
binaryExpr.rhsExpr = rewriteExpr(binaryExpr.rhsExpr);
result = binaryExpr;
int rhsExprTypeTag = binaryExpr.rhsExpr.type.tag;
int lhsExprTypeTag = binaryExpr.lhsExpr.type.tag;
if (rhsExprTypeTag != lhsExprTypeTag && (binaryExpr.opKind == OperatorKind.EQUAL ||
binaryExpr.opKind == OperatorKind.NOT_EQUAL ||
binaryExpr.opKind == OperatorKind.REF_EQUAL ||
binaryExpr.opKind == OperatorKind.REF_NOT_EQUAL)) {
if (lhsExprTypeTag == TypeTags.INT && rhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, symTable.intType);
return;
}
if (lhsExprTypeTag == TypeTags.BYTE && rhsExprTypeTag == TypeTags.INT) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, symTable.intType);
return;
}
}
if (lhsExprTypeTag == rhsExprTypeTag) {
return;
}
if (TypeTags.isStringTypeTag(lhsExprTypeTag) && binaryExpr.opKind == OperatorKind.ADD) {
if (TypeTags.isXMLTypeTag(rhsExprTypeTag)) {
binaryExpr.lhsExpr = ASTBuilderUtil.createXMLTextLiteralNode(binaryExpr, binaryExpr.lhsExpr,
binaryExpr.lhsExpr.pos, symTable.xmlType);
return;
}
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (TypeTags.isStringTypeTag(rhsExprTypeTag) && binaryExpr.opKind == OperatorKind.ADD) {
if (TypeTags.isXMLTypeTag(lhsExprTypeTag)) {
binaryExpr.rhsExpr = ASTBuilderUtil.createXMLTextLiteralNode(binaryExpr, binaryExpr.rhsExpr,
binaryExpr.rhsExpr.pos, symTable.xmlType);
return;
}
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
return;
}
if (lhsExprTypeTag == TypeTags.DECIMAL) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (rhsExprTypeTag == TypeTags.DECIMAL) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
return;
}
if (lhsExprTypeTag == TypeTags.FLOAT) {
binaryExpr.rhsExpr = createTypeCastExpr(binaryExpr.rhsExpr, binaryExpr.lhsExpr.type);
return;
}
if (rhsExprTypeTag == TypeTags.FLOAT) {
binaryExpr.lhsExpr = createTypeCastExpr(binaryExpr.lhsExpr, binaryExpr.rhsExpr.type);
}
}
private BLangInvocation replaceWithIntRange(Location location, BLangExpression lhsExpr,
BLangExpression rhsExpr) {
BInvokableSymbol symbol = (BInvokableSymbol) symTable.langInternalModuleSymbol.scope
.lookup(Names.CREATE_INT_RANGE).symbol;
BLangInvocation createIntRangeInvocation = ASTBuilderUtil.createInvocationExprForMethod(location, symbol,
new ArrayList<>(Lists.of(lhsExpr, rhsExpr)), symResolver);
createIntRangeInvocation.type = symTable.intRangeType;
return createIntRangeInvocation;
}
private void checkByteTypeIncompatibleOperations(BLangBinaryExpr binaryExpr) {
if (binaryExpr.expectedType == null) {
return;
}
int rhsExprTypeTag = binaryExpr.rhsExpr.type.tag;
int lhsExprTypeTag = binaryExpr.lhsExpr.type.tag;
if (rhsExprTypeTag != TypeTags.BYTE && lhsExprTypeTag != TypeTags.BYTE) {
return;
}
int resultTypeTag = binaryExpr.expectedType.tag;
if (resultTypeTag == TypeTags.INT) {
if (rhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.rhsExpr = addConversionExprIfRequired(binaryExpr.rhsExpr, symTable.intType);
}
if (lhsExprTypeTag == TypeTags.BYTE) {
binaryExpr.lhsExpr = addConversionExprIfRequired(binaryExpr.lhsExpr, symTable.intType);
}
}
}
/**
* This method checks whether given binary expression is related to shift operation.
* If its true, then both lhs and rhs of the binary expression will be converted to 'int' type.
* <p>
* byte a = 12;
* byte b = 34;
* int i = 234;
* int j = -4;
* <p>
* true: where binary expression's expected type is 'int'
* int i1 = a >> b;
* int i2 = a << b;
* int i3 = a >> i;
* int i4 = a << i;
* int i5 = i >> j;
* int i6 = i << j;
*/
private boolean isBitwiseShiftOperation(BLangBinaryExpr binaryExpr) {
return binaryExpr.opKind == OperatorKind.BITWISE_LEFT_SHIFT ||
binaryExpr.opKind == OperatorKind.BITWISE_RIGHT_SHIFT ||
binaryExpr.opKind == OperatorKind.BITWISE_UNSIGNED_RIGHT_SHIFT;
}
public void visit(BLangElvisExpr elvisExpr) {
BLangMatchExpression matchExpr = ASTBuilderUtil.createMatchExpression(elvisExpr.lhsExpr);
matchExpr.patternClauses.add(getMatchNullPatternGivenExpression(elvisExpr.pos,
rewriteExpr(elvisExpr.rhsExpr)));
matchExpr.type = elvisExpr.type;
matchExpr.pos = elvisExpr.pos;
result = rewriteExpr(matchExpr);
}
@Override
public void visit(BLangUnaryExpr unaryExpr) {
if (OperatorKind.BITWISE_COMPLEMENT == unaryExpr.operator) {
rewriteBitwiseComplementOperator(unaryExpr);
return;
}
unaryExpr.expr = rewriteExpr(unaryExpr.expr);
result = unaryExpr;
}
/**
* This method desugar a bitwise complement (~) unary expressions into a bitwise xor binary expression as below.
* Example : ~a -> a ^ -1;
* ~ 11110011 -> 00001100
* 11110011 ^ 11111111 -> 00001100
*
* @param unaryExpr the bitwise complement expression
*/
private void rewriteBitwiseComplementOperator(BLangUnaryExpr unaryExpr) {
final Location pos = unaryExpr.pos;
final BLangBinaryExpr binaryExpr = (BLangBinaryExpr) TreeBuilder.createBinaryExpressionNode();
binaryExpr.pos = pos;
binaryExpr.opKind = OperatorKind.BITWISE_XOR;
binaryExpr.lhsExpr = unaryExpr.expr;
if (TypeTags.BYTE == unaryExpr.type.tag) {
binaryExpr.type = symTable.byteType;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(pos, symTable.byteType, 0xffL);
binaryExpr.opSymbol = (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.BITWISE_XOR,
symTable.byteType, symTable.byteType);
} else {
binaryExpr.type = symTable.intType;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(pos, symTable.intType, -1L);
binaryExpr.opSymbol = (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.BITWISE_XOR,
symTable.intType, symTable.intType);
}
result = rewriteExpr(binaryExpr);
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr) {
if (conversionExpr.typeNode == null && !conversionExpr.annAttachments.isEmpty()) {
result = rewriteExpr(conversionExpr.expr);
return;
}
conversionExpr.typeNode = rewrite(conversionExpr.typeNode, env);
if (types.isXMLExprCastableToString(conversionExpr.expr.type, conversionExpr.type)) {
result = convertXMLTextToString(conversionExpr);
return;
}
conversionExpr.expr = rewriteExpr(conversionExpr.expr);
result = conversionExpr;
}
private BLangExpression convertXMLTextToString(BLangTypeConversionExpr conversionExpr) {
BLangInvocation invocationNode = createLanglibXMLInvocation(conversionExpr.pos, XML_GET_CONTENT_OF_TEXT,
conversionExpr.expr, new ArrayList<>(), new ArrayList<>());
BLangSimpleVariableDef tempVarDef = createVarDef("$$__xml_string__$$",
conversionExpr.targetType, invocationNode, conversionExpr.pos);
BLangSimpleVarRef tempVarRef = ASTBuilderUtil.createVariableRef(conversionExpr.pos, tempVarDef.var.symbol);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(conversionExpr.pos);
blockStmt.addStatement(tempVarDef);
BLangStatementExpression stmtExpr = ASTBuilderUtil.createStatementExpression(blockStmt, tempVarRef);
stmtExpr.type = conversionExpr.type;
return rewrite(stmtExpr, env);
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction) {
env.enclPkg.lambdaFunctions.add(bLangLambdaFunction);
result = bLangLambdaFunction;
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction) {
BLangFunction bLangFunction = (BLangFunction) TreeBuilder.createFunctionNode();
bLangFunction.setName(bLangArrowFunction.functionName);
BLangLambdaFunction lambdaFunction = (BLangLambdaFunction) TreeBuilder.createLambdaFunctionNode();
lambdaFunction.pos = bLangArrowFunction.pos;
bLangFunction.addFlag(Flag.LAMBDA);
lambdaFunction.function = bLangFunction;
BLangValueType returnType = (BLangValueType) TreeBuilder.createValueTypeNode();
returnType.type = bLangArrowFunction.body.expr.type;
bLangFunction.setReturnTypeNode(returnType);
bLangFunction.setBody(populateArrowExprBodyBlock(bLangArrowFunction));
bLangArrowFunction.params.forEach(bLangFunction::addParameter);
lambdaFunction.parent = bLangArrowFunction.parent;
lambdaFunction.type = bLangArrowFunction.funcType;
BLangFunction funcNode = lambdaFunction.function;
BInvokableSymbol funcSymbol = Symbols.createFunctionSymbol(Flags.asMask(funcNode.flagSet),
new Name(funcNode.name.value),
env.enclPkg.symbol.pkgID,
bLangArrowFunction.funcType,
env.enclEnv.enclVarSym, true,
bLangArrowFunction.pos, VIRTUAL);
SymbolEnv invokableEnv = SymbolEnv.createFunctionEnv(funcNode, funcSymbol.scope, env);
defineInvokableSymbol(funcNode, funcSymbol, invokableEnv);
List<BVarSymbol> paramSymbols = funcNode.requiredParams.stream().peek(varNode -> {
Scope enclScope = invokableEnv.scope;
varNode.symbol.kind = SymbolKind.FUNCTION;
varNode.symbol.owner = invokableEnv.scope.owner;
enclScope.define(varNode.symbol.name, varNode.symbol);
}).map(varNode -> varNode.symbol).collect(Collectors.toList());
funcSymbol.params = paramSymbols;
funcSymbol.restParam = getRestSymbol(funcNode);
funcSymbol.retType = funcNode.returnTypeNode.type;
List<BType> paramTypes = paramSymbols.stream().map(paramSym -> paramSym.type).collect(Collectors.toList());
funcNode.type = new BInvokableType(paramTypes, getRestType(funcSymbol), funcNode.returnTypeNode.type, null);
lambdaFunction.function.pos = bLangArrowFunction.pos;
lambdaFunction.function.body.pos = bLangArrowFunction.pos;
lambdaFunction.capturedClosureEnv = env;
rewrite(lambdaFunction.function, env);
env.enclPkg.addFunction(lambdaFunction.function);
bLangArrowFunction.function = lambdaFunction.function;
result = rewriteExpr(lambdaFunction);
}
private void defineInvokableSymbol(BLangInvokableNode invokableNode, BInvokableSymbol funcSymbol,
SymbolEnv invokableEnv) {
invokableNode.symbol = funcSymbol;
funcSymbol.scope = new Scope(funcSymbol);
invokableEnv.scope = funcSymbol.scope;
}
@Override
public void visit(BLangXMLQName xmlQName) {
result = xmlQName;
}
@Override
public void visit(BLangXMLAttribute xmlAttribute) {
xmlAttribute.name = rewriteExpr(xmlAttribute.name);
xmlAttribute.value = rewriteExpr(xmlAttribute.value);
result = xmlAttribute;
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral) {
xmlElementLiteral.startTagName = rewriteExpr(xmlElementLiteral.startTagName);
xmlElementLiteral.endTagName = rewriteExpr(xmlElementLiteral.endTagName);
xmlElementLiteral.modifiedChildren = rewriteExprs(xmlElementLiteral.modifiedChildren);
xmlElementLiteral.attributes = rewriteExprs(xmlElementLiteral.attributes);
Iterator<BLangXMLAttribute> attributesItr = xmlElementLiteral.attributes.iterator();
while (attributesItr.hasNext()) {
BLangXMLAttribute attribute = attributesItr.next();
if (!attribute.isNamespaceDeclr) {
continue;
}
BLangXMLNS xmlns;
if ((xmlElementLiteral.scope.owner.tag & SymTag.PACKAGE) == SymTag.PACKAGE) {
xmlns = new BLangPackageXMLNS();
} else {
xmlns = new BLangLocalXMLNS();
}
xmlns.namespaceURI = attribute.value.concatExpr;
xmlns.prefix = ((BLangXMLQName) attribute.name).localname;
xmlns.symbol = attribute.symbol;
xmlElementLiteral.inlineNamespaces.add(xmlns);
}
result = xmlElementLiteral;
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral) {
xmlTextLiteral.concatExpr = rewriteExpr(constructStringTemplateConcatExpression(xmlTextLiteral.textFragments));
result = xmlTextLiteral;
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral) {
xmlCommentLiteral.concatExpr = rewriteExpr(
constructStringTemplateConcatExpression(xmlCommentLiteral.textFragments));
result = xmlCommentLiteral;
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral) {
xmlProcInsLiteral.target = rewriteExpr(xmlProcInsLiteral.target);
xmlProcInsLiteral.dataConcatExpr =
rewriteExpr(constructStringTemplateConcatExpression(xmlProcInsLiteral.dataFragments));
result = xmlProcInsLiteral;
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString) {
xmlQuotedString.concatExpr = rewriteExpr(
constructStringTemplateConcatExpression(xmlQuotedString.textFragments));
result = xmlQuotedString;
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral) {
result = rewriteExpr(constructStringTemplateConcatExpression(stringTemplateLiteral.exprs));
}
/**
* The raw template literal gets desugared to a type init expression. For each literal, a new object class type
* def is generated from the object type. The type init expression creates an instance of this generated object
* type. For example, consider the following statements:
* string name = "Pubudu";
* 'object:RawTemplate rt = `Hello ${name}!`;
*
* The raw template literal above is desugared to:
* type RawTemplate$Impl$0 object {
* public string[] strings = ["Hello ", "!"];
* public (any|error)[] insertions;
*
* function init((any|error)[] insertions) {
* self.insertions = insertions;
* }
* };
*
*
* 'object:RawTemplate rt = new RawTemplate$Impl$0([name]);
*
* @param rawTemplateLiteral The raw template literal to be desugared.
*/
@Override
public void visit(BLangRawTemplateLiteral rawTemplateLiteral) {
Location pos = rawTemplateLiteral.pos;
BObjectType objType = (BObjectType) rawTemplateLiteral.type;
BLangClassDefinition objClassDef =
desugarTemplateLiteralObjectTypedef(rawTemplateLiteral.strings, objType, pos);
BObjectType classObjType = (BObjectType) objClassDef.type;
BVarSymbol insertionsSym = classObjType.fields.get("insertions").symbol;
BLangListConstructorExpr insertionsList = ASTBuilderUtil.createListConstructorExpr(pos, insertionsSym.type);
insertionsList.exprs.addAll(rawTemplateLiteral.insertions);
insertionsList.expectedType = insertionsSym.type;
BLangTypeInit typeNewExpr = ASTBuilderUtil.createEmptyTypeInit(pos, classObjType);
typeNewExpr.argsExpr.add(insertionsList);
typeNewExpr.initInvocation.argExprs.add(insertionsList);
typeNewExpr.initInvocation.requiredArgs.add(insertionsList);
result = rewriteExpr(typeNewExpr);
}
/**
* This method desugars a raw template literal object class for the provided raw template object type as follows.
* A literal defined as 'object:RawTemplate rt = `Hello ${name}!`;
* is desugared to,
* type $anonType$0 object {
* public string[] strings = ["Hello ", "!"];
* public (any|error)[] insertions;
*
* function init((any|error)[] insertions) {
* self.insertions = insertions;
* }
* };
* @param strings The string portions of the literal
* @param objectType The abstract object type for which an object class needs to be generated
* @param pos The diagnostic position info for the type node
* @return Returns the generated concrete object class def
*/
private BLangClassDefinition desugarTemplateLiteralObjectTypedef(List<BLangLiteral> strings, BObjectType objectType,
Location pos) {
BObjectTypeSymbol tSymbol = (BObjectTypeSymbol) objectType.tsymbol;
Name objectClassName = names.fromString(
anonModelHelper.getNextRawTemplateTypeKey(env.enclPkg.packageID, tSymbol.name));
BObjectTypeSymbol classTSymbol = Symbols.createClassSymbol(tSymbol.flags, objectClassName,
env.enclPkg.packageID, null, env.enclPkg.symbol,
pos, VIRTUAL, false);
classTSymbol.flags |= Flags.CLASS;
BObjectType objectClassType = new BObjectType(classTSymbol, classTSymbol.flags);
objectClassType.fields = objectType.fields;
classTSymbol.type = objectClassType;
BLangClassDefinition classDef = TypeDefBuilderHelper.createClassDef(pos, classTSymbol, env);
classDef.name = ASTBuilderUtil.createIdentifier(pos, objectClassType.tsymbol.name.value);
BType stringsType = objectClassType.fields.get("strings").symbol.type;
BLangListConstructorExpr stringsList = ASTBuilderUtil.createListConstructorExpr(pos, stringsType);
stringsList.exprs.addAll(strings);
stringsList.expectedType = stringsType;
classDef.fields.get(0).expr = stringsList;
BLangFunction userDefinedInitFunction = createUserDefinedObjectInitFn(classDef, env);
classDef.initFunction = userDefinedInitFunction;
env.enclPkg.functions.add(userDefinedInitFunction);
env.enclPkg.topLevelNodes.add(userDefinedInitFunction);
BLangFunction tempGeneratedInitFunction = createGeneratedInitializerFunction(classDef, env);
tempGeneratedInitFunction.clonedEnv = SymbolEnv.createFunctionEnv(tempGeneratedInitFunction,
tempGeneratedInitFunction.symbol.scope, env);
this.semanticAnalyzer.analyzeNode(tempGeneratedInitFunction, env);
classDef.generatedInitFunction = tempGeneratedInitFunction;
env.enclPkg.functions.add(classDef.generatedInitFunction);
env.enclPkg.topLevelNodes.add(classDef.generatedInitFunction);
return rewrite(classDef, env);
}
/**
* Creates a user-defined init() method for the provided object type node. If there are fields without default
* values specified in the type node, this will add parameters for those fields in the init() method and assign the
* param values to the respective fields in the method body.
*
* @param classDefn The object type node for which the init() method is generated
* @param env The symbol env for the object type node
* @return The generated init() method
*/
private BLangFunction createUserDefinedObjectInitFn(BLangClassDefinition classDefn, SymbolEnv env) {
BLangFunction initFunction =
TypeDefBuilderHelper.createInitFunctionForStructureType(classDefn.pos, classDefn.symbol, env,
names, Names.USER_DEFINED_INIT_SUFFIX,
symTable, classDefn.type);
BObjectTypeSymbol typeSymbol = ((BObjectTypeSymbol) classDefn.type.tsymbol);
typeSymbol.initializerFunc = new BAttachedFunction(Names.USER_DEFINED_INIT_SUFFIX, initFunction.symbol,
(BInvokableType) initFunction.type, classDefn.pos);
classDefn.initFunction = initFunction;
initFunction.returnTypeNode.type = symTable.nilType;
BLangBlockFunctionBody initFuncBody = (BLangBlockFunctionBody) initFunction.body;
BInvokableType initFnType = (BInvokableType) initFunction.type;
for (BLangSimpleVariable field : classDefn.fields) {
if (field.expr != null) {
continue;
}
BVarSymbol fieldSym = field.symbol;
BVarSymbol paramSym = new BVarSymbol(Flags.FINAL, fieldSym.name, this.env.scope.owner.pkgID, fieldSym.type,
initFunction.symbol, classDefn.pos, VIRTUAL);
BLangSimpleVariable param = ASTBuilderUtil.createVariable(classDefn.pos, fieldSym.name.value,
fieldSym.type, null, paramSym);
param.flagSet.add(Flag.FINAL);
initFunction.symbol.scope.define(paramSym.name, paramSym);
initFunction.symbol.params.add(paramSym);
initFnType.paramTypes.add(param.type);
initFunction.requiredParams.add(param);
BLangSimpleVarRef paramRef = ASTBuilderUtil.createVariableRef(initFunction.pos, paramSym);
BLangAssignment fieldInit = createStructFieldUpdate(initFunction, paramRef, fieldSym, field.type,
initFunction.receiver.symbol, field.name);
initFuncBody.addStatement(fieldInit);
}
return initFunction;
}
@Override
public void visit(BLangWorkerSend workerSendNode) {
workerSendNode.expr = visitCloneInvocation(rewriteExpr(workerSendNode.expr), workerSendNode.expr.type);
result = workerSendNode;
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr) {
syncSendExpr.expr = visitCloneInvocation(rewriteExpr(syncSendExpr.expr), syncSendExpr.expr.type);
result = syncSendExpr;
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode) {
result = workerReceiveNode;
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr) {
workerFlushExpr.workerIdentifierList = workerFlushExpr.cachedWorkerSendStmts
.stream().map(send -> send.workerIdentifier).distinct().collect(Collectors.toList());
result = workerFlushExpr;
}
@Override
public void visit(BLangTransactionalExpr transactionalExpr) {
BInvokableSymbol isTransactionalSymbol =
(BInvokableSymbol) transactionDesugar.getInternalTransactionModuleInvokableSymbol(IS_TRANSACTIONAL);
result = ASTBuilderUtil
.createInvocationExprMethod(transactionalExpr.pos, isTransactionalSymbol, Collections.emptyList(),
Collections.emptyList(), symResolver);
}
@Override
public void visit(BLangCommitExpr commitExpr) {
BLangStatementExpression stmtExpr = transactionDesugar.desugar(commitExpr, env);
result = rewriteExpr(stmtExpr);
}
@Override
public void visit(BLangXMLAttributeAccess xmlAttributeAccessExpr) {
xmlAttributeAccessExpr.indexExpr = rewriteExpr(xmlAttributeAccessExpr.indexExpr);
xmlAttributeAccessExpr.expr = rewriteExpr(xmlAttributeAccessExpr.expr);
if (xmlAttributeAccessExpr.indexExpr != null
&& xmlAttributeAccessExpr.indexExpr.getKind() == NodeKind.XML_QNAME) {
((BLangXMLQName) xmlAttributeAccessExpr.indexExpr).isUsedInXML = true;
}
xmlAttributeAccessExpr.desugared = true;
if (xmlAttributeAccessExpr.lhsVar || xmlAttributeAccessExpr.indexExpr != null) {
result = xmlAttributeAccessExpr;
} else {
result = rewriteExpr(xmlAttributeAccessExpr);
}
}
@Override
public void visit(BLangFail failNode) {
if (this.onFailClause != null) {
if (this.onFailClause.bodyContainsFail) {
result = rewriteNestedOnFail(this.onFailClause, failNode);
} else {
BLangStatementExpression expression = createOnFailInvocation(onFailCallFuncDef, onFailClause,
failNode);
failNode.exprStmt = createExpressionStatement(failNode.pos, expression,
onFailClause.statementBlockReturns, env);
result = failNode;
}
} else {
BLangReturn stmt = ASTBuilderUtil.createReturnStmt(failNode.pos, rewrite(failNode.expr, env));
stmt.desugared = true;
result = stmt;
}
}
@Override
public void visit(BLangLocalVarRef localVarRef) {
result = localVarRef;
}
@Override
public void visit(BLangFieldVarRef fieldVarRef) {
result = fieldVarRef;
}
@Override
public void visit(BLangPackageVarRef packageVarRef) {
result = packageVarRef;
}
@Override
public void visit(BLangFunctionVarRef functionVarRef) {
result = functionVarRef;
}
@Override
public void visit(BLangStructFieldAccessExpr fieldAccessExpr) {
result = fieldAccessExpr;
}
@Override
public void visit(BLangStructFunctionVarRef functionVarRef) {
result = functionVarRef;
}
@Override
public void visit(BLangMapAccessExpr mapKeyAccessExpr) {
result = mapKeyAccessExpr;
}
@Override
public void visit(BLangArrayAccessExpr arrayIndexAccessExpr) {
result = arrayIndexAccessExpr;
}
@Override
public void visit(BLangTupleAccessExpr arrayIndexAccessExpr) {
result = arrayIndexAccessExpr;
}
@Override
public void visit(BLangTableAccessExpr tableKeyAccessExpr) {
result = tableKeyAccessExpr;
}
@Override
public void visit(BLangMapLiteral mapLiteral) {
result = mapLiteral;
}
@Override
public void visit(BLangStructLiteral structLiteral) {
result = structLiteral;
}
@Override
public void visit(BLangWaitForAllExpr.BLangWaitLiteral waitLiteral) {
result = waitLiteral;
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess) {
xmlElementAccess.expr = rewriteExpr(xmlElementAccess.expr);
ArrayList<BLangExpression> filters = expandFilters(xmlElementAccess.filters);
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlElementAccess.pos, XML_INTERNAL_GET_ELEMENTS,
xmlElementAccess.expr, new ArrayList<>(), filters);
result = rewriteExpr(invocationNode);
}
private ArrayList<BLangExpression> expandFilters(List<BLangXMLElementFilter> filters) {
Map<Name, BXMLNSSymbol> nameBXMLNSSymbolMap = symResolver.resolveAllNamespaces(env);
BXMLNSSymbol defaultNSSymbol = nameBXMLNSSymbolMap.get(names.fromString(XMLConstants.DEFAULT_NS_PREFIX));
String defaultNS = defaultNSSymbol != null ? defaultNSSymbol.namespaceURI : null;
ArrayList<BLangExpression> args = new ArrayList<>();
for (BLangXMLElementFilter filter : filters) {
BSymbol nsSymbol = symResolver.lookupSymbolInPrefixSpace(env, names.fromString(filter.namespace));
if (nsSymbol == symTable.notFoundSymbol) {
if (defaultNS != null && !filter.name.equals("*")) {
String expandedName = createExpandedQName(defaultNS, filter.name);
args.add(createStringLiteral(filter.elemNamePos, expandedName));
} else {
args.add(createStringLiteral(filter.elemNamePos, filter.name));
}
} else {
BXMLNSSymbol bxmlnsSymbol = (BXMLNSSymbol) nsSymbol;
String expandedName = createExpandedQName(bxmlnsSymbol.namespaceURI, filter.name);
BLangLiteral stringLiteral = createStringLiteral(filter.elemNamePos, expandedName);
args.add(stringLiteral);
}
}
return args;
}
private BLangInvocation createLanglibXMLInvocation(Location pos, String functionName,
BLangExpression invokeOnExpr,
ArrayList<BLangExpression> args,
ArrayList<BLangExpression> restArgs) {
invokeOnExpr = rewriteExpr(invokeOnExpr);
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = invokeOnExpr;
invocationNode.symbol = symResolver.lookupLangLibMethod(symTable.xmlType, names.fromString(functionName));
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.add(invokeOnExpr);
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.restArgs = rewriteExprs(restArgs);
invocationNode.type = ((BInvokableType) invocationNode.symbol.type).getReturnType();
invocationNode.langLibInvocation = true;
return invocationNode;
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation) {
xmlNavigation.expr = rewriteExpr(xmlNavigation.expr);
xmlNavigation.childIndex = rewriteExpr(xmlNavigation.childIndex);
ArrayList<BLangExpression> filters = expandFilters(xmlNavigation.filters);
if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.DESCENDANTS) {
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos,
XML_INTERNAL_SELECT_DESCENDANTS, xmlNavigation.expr, new ArrayList<>(), filters);
result = rewriteExpr(invocationNode);
} else if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.CHILDREN) {
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos, XML_INTERNAL_CHILDREN,
xmlNavigation.expr, new ArrayList<>(), new ArrayList<>());
result = rewriteExpr(invocationNode);
} else {
BLangExpression childIndexExpr;
if (xmlNavigation.childIndex == null) {
childIndexExpr = new BLangLiteral(Long.valueOf(-1), symTable.intType);
} else {
childIndexExpr = xmlNavigation.childIndex;
}
ArrayList<BLangExpression> args = new ArrayList<>();
args.add(rewriteExpr(childIndexExpr));
BLangInvocation invocationNode = createLanglibXMLInvocation(xmlNavigation.pos,
XML_INTERNAL_GET_FILTERED_CHILDREN_FLAT, xmlNavigation.expr, args, filters);
result = rewriteExpr(invocationNode);
}
}
@Override
public void visit(BLangIsAssignableExpr assignableExpr) {
assignableExpr.lhsExpr = rewriteExpr(assignableExpr.lhsExpr);
result = assignableExpr;
}
@Override
public void visit(BFunctionPointerInvocation fpInvocation) {
result = fpInvocation;
}
@Override
public void visit(BLangTypedescExpr typedescExpr) {
typedescExpr.typeNode = rewrite(typedescExpr.typeNode, env);
result = typedescExpr;
}
@Override
public void visit(BLangIntRangeExpression intRangeExpression) {
if (!intRangeExpression.includeStart) {
intRangeExpression.startExpr = getModifiedIntRangeStartExpr(intRangeExpression.startExpr);
}
if (!intRangeExpression.includeEnd) {
intRangeExpression.endExpr = getModifiedIntRangeEndExpr(intRangeExpression.endExpr);
}
intRangeExpression.startExpr = rewriteExpr(intRangeExpression.startExpr);
intRangeExpression.endExpr = rewriteExpr(intRangeExpression.endExpr);
result = intRangeExpression;
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression) {
result = rewriteExpr(bLangVarArgsExpression.expr);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression) {
bLangNamedArgsExpression.expr = rewriteExpr(bLangNamedArgsExpression.expr);
result = bLangNamedArgsExpression.expr;
}
@Override
public void visit(BLangMatchExpression bLangMatchExpression) {
addMatchExprDefaultCase(bLangMatchExpression);
String matchTempResultVarName = GEN_VAR_PREFIX.value + "temp_result";
BLangSimpleVariable tempResultVar =
ASTBuilderUtil.createVariable(bLangMatchExpression.pos, matchTempResultVarName,
bLangMatchExpression.type, null,
new BVarSymbol(0, names.fromString(matchTempResultVarName),
this.env.scope.owner.pkgID, bLangMatchExpression.type,
this.env.scope.owner, bLangMatchExpression.pos, VIRTUAL));
BLangSimpleVariableDef tempResultVarDef =
ASTBuilderUtil.createVariableDef(bLangMatchExpression.pos, tempResultVar);
tempResultVarDef.desugared = true;
BLangBlockStmt stmts = ASTBuilderUtil.createBlockStmt(bLangMatchExpression.pos, Lists.of(tempResultVarDef));
List<BLangMatchTypedBindingPatternClause> patternClauses = new ArrayList<>();
for (int i = 0; i < bLangMatchExpression.patternClauses.size(); i++) {
BLangMatchExprPatternClause pattern = bLangMatchExpression.patternClauses.get(i);
pattern.expr = rewriteExpr(pattern.expr);
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, tempResultVar.symbol);
pattern.expr = addConversionExprIfRequired(pattern.expr, tempResultVarRef.type);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(pattern.pos, tempResultVarRef, pattern.expr);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(pattern.pos, Lists.of(assignmentStmt));
patternClauses.add(ASTBuilderUtil.createMatchStatementPattern(pattern.pos, pattern.variable, patternBody));
}
stmts.addStatement(ASTBuilderUtil.createMatchStatement(bLangMatchExpression.pos, bLangMatchExpression.expr,
patternClauses));
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, tempResultVar.symbol);
BLangStatementExpression statementExpr = createStatementExpression(stmts, tempResultVarRef);
statementExpr.type = bLangMatchExpression.type;
result = rewriteExpr(statementExpr);
}
@Override
public void visit(BLangCheckedExpr checkedExpr) {
visitCheckAndCheckPanicExpr(checkedExpr, false);
}
@Override
public void visit(BLangCheckPanickedExpr checkedExpr) {
visitCheckAndCheckPanicExpr(checkedExpr, true);
}
private void visitCheckAndCheckPanicExpr(BLangCheckedExpr checkedExpr, boolean isCheckPanic) {
String checkedExprVarName = GEN_VAR_PREFIX.value;
BLangSimpleVariable checkedExprVar =
ASTBuilderUtil.createVariable(checkedExpr.pos, checkedExprVarName, checkedExpr.type, null,
new BVarSymbol(0, names.fromString(checkedExprVarName),
this.env.scope.owner.pkgID, checkedExpr.type,
this.env.scope.owner, checkedExpr.pos, VIRTUAL));
BLangSimpleVariableDef checkedExprVarDef = ASTBuilderUtil.createVariableDef(checkedExpr.pos, checkedExprVar);
checkedExprVarDef.desugared = true;
BLangMatchTypedBindingPatternClause patternSuccessCase =
getSafeAssignSuccessPattern(checkedExprVar.pos, checkedExprVar.symbol.type, true,
checkedExprVar.symbol, null);
BLangMatchTypedBindingPatternClause patternErrorCase =
getSafeAssignErrorPattern(checkedExpr.pos, this.env.scope.owner, checkedExpr.equivalentErrorTypeList,
isCheckPanic);
BLangMatch matchStmt = ASTBuilderUtil.createMatchStatement(checkedExpr.pos, checkedExpr.expr,
new ArrayList<BLangMatchTypedBindingPatternClause>() {{
add(patternSuccessCase);
add(patternErrorCase);
}});
BLangBlockStmt generatedStmtBlock = ASTBuilderUtil.createBlockStmt(checkedExpr.pos,
new ArrayList<BLangStatement>() {{
add(checkedExprVarDef);
add(matchStmt);
}});
BLangSimpleVarRef tempCheckedExprVarRef = ASTBuilderUtil.createVariableRef(
checkedExpr.pos, checkedExprVar.symbol);
BLangStatementExpression statementExpr = createStatementExpression(
generatedStmtBlock, tempCheckedExprVarRef);
statementExpr.type = checkedExpr.type;
result = rewriteExpr(statementExpr);
}
@Override
public void visit(BLangServiceConstructorExpr serviceConstructorExpr) {
final BLangTypeInit typeInit = ASTBuilderUtil.createEmptyTypeInit(serviceConstructorExpr.pos,
serviceConstructorExpr.serviceNode.serviceClass.symbol.type);
serviceConstructorExpr.serviceNode.annAttachments.forEach(attachment -> rewrite(attachment, env));
result = rewriteExpr(typeInit);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr) {
BLangExpression expr = typeTestExpr.expr;
if (types.isValueType(expr.type)) {
addConversionExprIfRequired(expr, symTable.anyType);
}
typeTestExpr.expr = rewriteExpr(expr);
typeTestExpr.typeNode = rewrite(typeTestExpr.typeNode, env);
result = typeTestExpr;
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr) {
BLangBinaryExpr binaryExpr = (BLangBinaryExpr) TreeBuilder.createBinaryExpressionNode();
binaryExpr.pos = annotAccessExpr.pos;
binaryExpr.opKind = OperatorKind.ANNOT_ACCESS;
binaryExpr.lhsExpr = annotAccessExpr.expr;
binaryExpr.rhsExpr = ASTBuilderUtil.createLiteral(annotAccessExpr.pkgAlias.pos, symTable.stringType,
annotAccessExpr.annotationSymbol.bvmAlias());
binaryExpr.type = annotAccessExpr.type;
binaryExpr.opSymbol = new BOperatorSymbol(names.fromString(OperatorKind.ANNOT_ACCESS.value()), null,
new BInvokableType(Lists.of(binaryExpr.lhsExpr.type,
binaryExpr.rhsExpr.type),
annotAccessExpr.type, null), null,
symTable.builtinPos, VIRTUAL);
result = rewriteExpr(binaryExpr);
}
@Override
public void visit(BLangIsLikeExpr isLikeExpr) {
isLikeExpr.expr = rewriteExpr(isLikeExpr.expr);
result = isLikeExpr;
}
@Override
public void visit(BLangStatementExpression bLangStatementExpression) {
bLangStatementExpression.expr = rewriteExpr(bLangStatementExpression.expr);
bLangStatementExpression.stmt = rewrite(bLangStatementExpression.stmt, env);
result = bLangStatementExpression;
}
@Override
public void visit(BLangQueryExpr queryExpr) {
BLangStatementExpression stmtExpr = queryDesugar.desugar(queryExpr, env);
result = rewrite(stmtExpr, env);
}
@Override
public void visit(BLangQueryAction queryAction) {
BLangStatementExpression stmtExpr = queryDesugar.desugar(queryAction, env);
result = rewrite(stmtExpr, env);
}
@Override
public void visit(BLangJSONArrayLiteral jsonArrayLiteral) {
jsonArrayLiteral.exprs = rewriteExprs(jsonArrayLiteral.exprs);
result = jsonArrayLiteral;
}
@Override
public void visit(BLangConstant constant) {
BConstantSymbol constSymbol = constant.symbol;
if (constSymbol.literalType.tag <= TypeTags.BOOLEAN || constSymbol.literalType.tag == TypeTags.NIL) {
if (constSymbol.literalType.tag != TypeTags.NIL && constSymbol.value.value == null) {
throw new IllegalStateException();
}
BLangLiteral literal = ASTBuilderUtil.createLiteral(constant.expr.pos, constSymbol.literalType,
constSymbol.value.value);
constant.expr = rewriteExpr(literal);
} else {
constant.expr = rewriteExpr(constant.expr);
}
constant.annAttachments.forEach(attachment -> rewrite(attachment, env));
result = constant;
}
@Override
public void visit(BLangIgnoreExpr ignoreExpr) {
result = ignoreExpr;
}
@Override
public void visit(BLangDynamicArgExpr dynamicParamExpr) {
dynamicParamExpr.conditionalArgument = rewriteExpr(dynamicParamExpr.conditionalArgument);
dynamicParamExpr.condition = rewriteExpr(dynamicParamExpr.condition);
result = dynamicParamExpr;
}
@Override
public void visit(BLangConstRef constantRef) {
result = ASTBuilderUtil.createLiteral(constantRef.pos, constantRef.type, constantRef.value);
}
BLangSimpleVariableDef getIteratorVariableDefinition(Location pos, BVarSymbol collectionSymbol,
BInvokableSymbol iteratorInvokableSymbol,
boolean isIteratorFuncFromLangLib) {
BLangSimpleVarRef dataReference = ASTBuilderUtil.createVariableRef(pos, collectionSymbol);
BLangInvocation iteratorInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
iteratorInvocation.pos = pos;
iteratorInvocation.expr = dataReference;
iteratorInvocation.symbol = iteratorInvokableSymbol;
iteratorInvocation.type = iteratorInvokableSymbol.retType;
iteratorInvocation.argExprs = Lists.of(dataReference);
iteratorInvocation.requiredArgs = iteratorInvocation.argExprs;
iteratorInvocation.langLibInvocation = isIteratorFuncFromLangLib;
BVarSymbol iteratorSymbol = new BVarSymbol(0, names.fromString("$iterator$"), this.env.scope.owner.pkgID,
iteratorInvokableSymbol.retType, this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVariable iteratorVariable = ASTBuilderUtil.createVariable(pos, "$iterator$",
iteratorInvokableSymbol.retType, iteratorInvocation, iteratorSymbol);
return ASTBuilderUtil.createVariableDef(pos, iteratorVariable);
}
BLangSimpleVariableDef getIteratorNextVariableDefinition(Location pos, BType nillableResultType,
BVarSymbol iteratorSymbol,
BVarSymbol resultSymbol) {
BLangInvocation nextInvocation = createIteratorNextInvocation(pos, iteratorSymbol);
BLangSimpleVariable resultVariable = ASTBuilderUtil.createVariable(pos, "$result$",
nillableResultType, nextInvocation, resultSymbol);
return ASTBuilderUtil.createVariableDef(pos, resultVariable);
}
BLangAssignment getIteratorNextAssignment(Location pos,
BVarSymbol iteratorSymbol, BVarSymbol resultSymbol) {
BLangSimpleVarRef resultReferenceInAssignment = ASTBuilderUtil.createVariableRef(pos, resultSymbol);
BLangInvocation nextInvocation = createIteratorNextInvocation(pos, iteratorSymbol);
nextInvocation.expr.type = types.getSafeType(nextInvocation.expr.type, true, false);
return ASTBuilderUtil.createAssignmentStmt(pos, resultReferenceInAssignment, nextInvocation, false);
}
BLangInvocation createIteratorNextInvocation(Location pos, BVarSymbol iteratorSymbol) {
BLangIdentifier nextIdentifier = ASTBuilderUtil.createIdentifier(pos, "next");
BLangSimpleVarRef iteratorReferenceInNext = ASTBuilderUtil.createVariableRef(pos, iteratorSymbol);
BInvokableSymbol nextFuncSymbol = getNextFunc((BObjectType) iteratorSymbol.type).symbol;
BLangInvocation nextInvocation = (BLangInvocation) TreeBuilder.createInvocationNode();
nextInvocation.pos = pos;
nextInvocation.name = nextIdentifier;
nextInvocation.expr = iteratorReferenceInNext;
nextInvocation.requiredArgs = Lists.of(ASTBuilderUtil.createVariableRef(pos, iteratorSymbol));
nextInvocation.argExprs = nextInvocation.requiredArgs;
nextInvocation.symbol = nextFuncSymbol;
nextInvocation.type = nextFuncSymbol.retType;
return nextInvocation;
}
private BAttachedFunction getNextFunc(BObjectType iteratorType) {
BObjectTypeSymbol iteratorSymbol = (BObjectTypeSymbol) iteratorType.tsymbol;
for (BAttachedFunction bAttachedFunction : iteratorSymbol.attachedFuncs) {
if (bAttachedFunction.funcName.value.equals("next")) {
return bAttachedFunction;
}
}
return null;
}
BLangFieldBasedAccess getValueAccessExpression(Location location, BType varType,
BVarSymbol resultSymbol) {
return getFieldAccessExpression(location, "value", varType, resultSymbol);
}
BLangFieldBasedAccess getFieldAccessExpression(Location pos, String fieldName, BType varType,
BVarSymbol resultSymbol) {
BLangSimpleVarRef resultReferenceInVariableDef = ASTBuilderUtil.createVariableRef(pos, resultSymbol);
BLangIdentifier valueIdentifier = ASTBuilderUtil.createIdentifier(pos, fieldName);
BLangFieldBasedAccess fieldBasedAccessExpression =
ASTBuilderUtil.createFieldAccessExpr(resultReferenceInVariableDef, valueIdentifier);
fieldBasedAccessExpression.pos = pos;
fieldBasedAccessExpression.type = varType;
fieldBasedAccessExpression.originalType = fieldBasedAccessExpression.type;
return fieldBasedAccessExpression;
}
private BlockFunctionBodyNode populateArrowExprBodyBlock(BLangArrowFunction bLangArrowFunction) {
BlockFunctionBodyNode blockNode = TreeBuilder.createBlockFunctionBodyNode();
BLangReturn returnNode = (BLangReturn) TreeBuilder.createReturnNode();
returnNode.pos = bLangArrowFunction.body.expr.pos;
returnNode.setExpression(bLangArrowFunction.body.expr);
blockNode.addStatement(returnNode);
return blockNode;
}
private BLangInvocation createInvocationNode(String functionName, List<BLangExpression> args, BType retType) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.symbol = symTable.rootScope.lookup(new Name(functionName)).symbol;
invocationNode.type = retType;
invocationNode.requiredArgs = args;
return invocationNode;
}
private BLangInvocation createLangLibInvocationNode(String functionName,
BLangExpression onExpr,
List<BLangExpression> args,
BType retType,
Location pos) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.expr = onExpr;
invocationNode.symbol = symResolver.lookupLangLibMethod(onExpr.type, names.fromString(functionName));
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.add(onExpr);
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.type = retType != null ? retType : ((BInvokableSymbol) invocationNode.symbol).retType;
invocationNode.langLibInvocation = true;
return invocationNode;
}
private BLangInvocation createLangLibInvocationNode(String functionName,
List<BLangExpression> args,
BType retType,
Location pos) {
BLangInvocation invocationNode = (BLangInvocation) TreeBuilder.createInvocationNode();
invocationNode.pos = pos;
BLangIdentifier name = (BLangIdentifier) TreeBuilder.createIdentifierNode();
name.setLiteral(false);
name.setValue(functionName);
name.pos = pos;
invocationNode.name = name;
invocationNode.pkgAlias = (BLangIdentifier) TreeBuilder.createIdentifierNode();
invocationNode.symbol = symResolver.lookupMethodInModule(symTable.langInternalModuleSymbol,
names.fromString(functionName), env);
ArrayList<BLangExpression> requiredArgs = new ArrayList<>();
requiredArgs.addAll(args);
invocationNode.requiredArgs = requiredArgs;
invocationNode.type = retType != null ? retType : ((BInvokableSymbol) invocationNode.symbol).retType;
invocationNode.langLibInvocation = true;
return invocationNode;
}
private BLangArrayLiteral createArrayLiteralExprNode() {
BLangArrayLiteral expr = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
expr.exprs = new ArrayList<>();
expr.type = new BArrayType(symTable.anyType);
return expr;
}
private void visitFunctionPointerInvocation(BLangInvocation iExpr) {
BLangAccessExpression expr;
if (iExpr.expr == null) {
expr = new BLangSimpleVarRef();
} else {
BLangFieldBasedAccess fieldBasedAccess = new BLangFieldBasedAccess();
fieldBasedAccess.expr = iExpr.expr;
fieldBasedAccess.field = iExpr.name;
expr = fieldBasedAccess;
}
expr.symbol = iExpr.symbol;
expr.type = iExpr.symbol.type;
BLangExpression rewritten = rewriteExpr(expr);
result = new BFunctionPointerInvocation(iExpr, rewritten);
}
private BLangExpression visitCloneInvocation(BLangExpression expr, BType lhsType) {
if (types.isValueType(expr.type)) {
return expr;
}
if (expr.type.tag == TypeTags.ERROR) {
return expr;
}
BLangInvocation cloneInvok = createLangLibInvocationNode("clone", expr, new ArrayList<>(), null, expr.pos);
return addConversionExprIfRequired(cloneInvok, lhsType);
}
private BLangExpression visitCloneReadonly(BLangExpression expr, BType lhsType) {
if (types.isValueType(expr.type)) {
return expr;
}
if (expr.type.tag == TypeTags.ERROR) {
return expr;
}
BLangInvocation cloneInvok = createLangLibInvocationNode("cloneReadOnly", expr, new ArrayList<>(), expr.type,
expr.pos);
return addConversionExprIfRequired(cloneInvok, lhsType);
}
@SuppressWarnings("unchecked")
<E extends BLangNode> E rewrite(E node, SymbolEnv env) {
if (node == null) {
return null;
}
if (node.desugared) {
return node;
}
SymbolEnv previousEnv = this.env;
this.env = env;
node.accept(this);
BLangNode resultNode = this.result;
this.result = null;
resultNode.desugared = true;
this.env = previousEnv;
return (E) resultNode;
}
@SuppressWarnings("unchecked")
<E extends BLangExpression> E rewriteExpr(E node) {
if (node == null) {
return null;
}
if (node.desugared) {
return node;
}
BLangExpression expr = node;
if (node.impConversionExpr != null) {
expr = node.impConversionExpr;
node.impConversionExpr = null;
}
expr.accept(this);
BLangNode resultNode = this.result;
this.result = null;
resultNode.desugared = true;
return (E) resultNode;
}
@SuppressWarnings("unchecked")
<E extends BLangStatement> E rewrite(E statement, SymbolEnv env) {
if (statement == null) {
return null;
}
BLangStatementLink link = new BLangStatementLink();
link.parent = currentLink;
currentLink = link;
BLangStatement stmt = (BLangStatement) rewrite((BLangNode) statement, env);
link.statement = stmt;
stmt.statementLink = link;
currentLink = link.parent;
return (E) stmt;
}
private <E extends BLangStatement> List<E> rewriteStmt(List<E> nodeList, SymbolEnv env) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewrite(nodeList.get(i), env));
}
return nodeList;
}
private <E extends BLangNode> List<E> rewrite(List<E> nodeList, SymbolEnv env) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewrite(nodeList.get(i), env));
}
return nodeList;
}
private <E extends BLangExpression> List<E> rewriteExprs(List<E> nodeList) {
for (int i = 0; i < nodeList.size(); i++) {
nodeList.set(i, rewriteExpr(nodeList.get(i)));
}
return nodeList;
}
private BLangLiteral createStringLiteral(Location pos, String value) {
BLangLiteral stringLit = new BLangLiteral(value, symTable.stringType);
stringLit.pos = pos;
return stringLit;
}
private BLangLiteral createIntLiteral(long value) {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = value;
literal.type = symTable.intType;
return literal;
}
private BLangLiteral createByteLiteral(Location pos, Byte value) {
BLangLiteral byteLiteral = new BLangLiteral(Byte.toUnsignedInt(value), symTable.byteType);
byteLiteral.pos = pos;
return byteLiteral;
}
private BLangExpression createTypeCastExpr(BLangExpression expr, BType targetType) {
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr) TreeBuilder.createTypeConversionNode();
conversionExpr.pos = expr.pos;
conversionExpr.expr = expr;
conversionExpr.type = targetType;
conversionExpr.targetType = targetType;
conversionExpr.internal = true;
return conversionExpr;
}
private BType getElementType(BType type) {
if (type.tag != TypeTags.ARRAY) {
return type;
}
return getElementType(((BArrayType) type).getElementType());
}
private void addReturnIfNotPresent(BLangInvokableNode invokableNode) {
if (Symbols.isNative(invokableNode.symbol) ||
(invokableNode.hasBody() && invokableNode.body.getKind() != NodeKind.BLOCK_FUNCTION_BODY)) {
return;
}
BLangBlockFunctionBody funcBody = (BLangBlockFunctionBody) invokableNode.body;
boolean isNeverOrNilableReturn = invokableNode.symbol.type.getReturnType().tag == TypeTags.NEVER ||
invokableNode.symbol.type.getReturnType().isNullable();
if (invokableNode.workers.size() == 0 && isNeverOrNilableReturn && (funcBody.stmts.size() < 1 ||
funcBody.stmts.get(funcBody.stmts.size() - 1).getKind() != NodeKind.RETURN)) {
Location invPos = invokableNode.pos;
Location returnStmtPos = new BLangDiagnosticLocation(invPos.lineRange().filePath(),
invPos.lineRange().endLine().line(),
invPos.lineRange().endLine().line(),
invPos.lineRange().startLine().offset(),
invPos.lineRange().startLine().offset());
BLangReturn returnStmt = ASTBuilderUtil.createNilReturnStmt(returnStmtPos, symTable.nilType);
funcBody.addStatement(returnStmt);
}
}
/**
* Reorder the invocation arguments to match the original function signature.
*
* @param iExpr Function invocation expressions to reorder the arguments
*/
private void reorderArguments(BLangInvocation iExpr) {
BSymbol symbol = iExpr.symbol;
if (symbol == null || symbol.type.tag != TypeTags.INVOKABLE) {
return;
}
BInvokableSymbol invokableSymbol = (BInvokableSymbol) symbol;
List<BLangExpression> restArgs = iExpr.restArgs;
int originalRequiredArgCount = iExpr.requiredArgs.size();
BLangSimpleVarRef varargRef = null;
BLangBlockStmt blockStmt = null;
BType varargVarType = null;
int restArgCount = restArgs.size();
if (restArgCount > 0 &&
restArgs.get(restArgCount - 1).getKind() == NodeKind.REST_ARGS_EXPR &&
originalRequiredArgCount < invokableSymbol.params.size()) {
BLangExpression expr = ((BLangRestArgsExpression) restArgs.get(restArgCount - 1)).expr;
Location varargExpPos = expr.pos;
varargVarType = expr.type;
String varargVarName = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varargVarSymbol = new BVarSymbol(0, names.fromString(varargVarName), this.env.scope.owner.pkgID,
varargVarType, this.env.scope.owner, varargExpPos, VIRTUAL);
varargRef = ASTBuilderUtil.createVariableRef(varargExpPos, varargVarSymbol);
BLangSimpleVariable var = createVariable(varargExpPos, varargVarName, varargVarType, expr, varargVarSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(varargExpPos);
varDef.var = var;
varDef.type = varargVarType;
blockStmt = createBlockStmt(varargExpPos);
blockStmt.stmts.add(varDef);
}
if (!invokableSymbol.params.isEmpty()) {
reorderNamedArgs(iExpr, invokableSymbol, varargRef);
}
if (restArgCount == 0 || restArgs.get(restArgCount - 1).getKind() != NodeKind.REST_ARGS_EXPR) {
if (invokableSymbol.restParam == null) {
return;
}
BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
List<BLangExpression> exprs = new ArrayList<>();
BArrayType arrayType = (BArrayType) invokableSymbol.restParam.type;
BType elemType = arrayType.eType;
for (BLangExpression restArg : restArgs) {
exprs.add(addConversionExprIfRequired(restArg, elemType));
}
arrayLiteral.exprs = exprs;
arrayLiteral.type = arrayType;
if (restArgCount != 0) {
iExpr.restArgs = new ArrayList<>();
}
iExpr.restArgs.add(arrayLiteral);
return;
}
if (restArgCount == 1 && restArgs.get(0).getKind() == NodeKind.REST_ARGS_EXPR) {
if (iExpr.requiredArgs.size() == originalRequiredArgCount) {
return;
}
BLangExpression firstNonRestArg = iExpr.requiredArgs.remove(0);
BLangStatementExpression stmtExpression = createStatementExpression(blockStmt, firstNonRestArg);
stmtExpression.type = firstNonRestArg.type;
iExpr.requiredArgs.add(0, stmtExpression);
if (invokableSymbol.restParam == null) {
restArgs.remove(0);
return;
}
BLangRestArgsExpression restArgsExpression = (BLangRestArgsExpression) restArgs.remove(0);
BArrayType restParamType = (BArrayType) invokableSymbol.restParam.type;
if (restArgsExpression.type.tag == TypeTags.RECORD) {
BLangExpression expr = new BLangIgnoreExpr();
expr.type = restParamType;
restArgs.add(expr);
return;
}
Location pos = restArgsExpression.pos;
BLangArrayLiteral newArrayLiteral = createArrayLiteralExprNode();
newArrayLiteral.type = restParamType;
String name = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID,
restParamType, this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef arrayVarRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
BLangSimpleVariable var = createVariable(pos, name, restParamType, newArrayLiteral, varSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(pos);
varDef.var = var;
varDef.type = restParamType;
BLangLiteral startIndex = createIntLiteral(invokableSymbol.params.size() - originalRequiredArgCount);
BLangInvocation lengthInvocation = createLengthInvocation(pos, varargRef);
BLangInvocation intRangeInvocation = replaceWithIntRange(pos, startIndex,
getModifiedIntRangeEndExpr(lengthInvocation));
BLangForeach foreach = (BLangForeach) TreeBuilder.createForeachNode();
foreach.pos = pos;
foreach.collection = intRangeInvocation;
types.setForeachTypedBindingPatternType(foreach);
final BLangSimpleVariable foreachVariable = ASTBuilderUtil.createVariable(pos, "$foreach$i",
foreach.varType);
foreachVariable.symbol = new BVarSymbol(0, names.fromIdNode(foreachVariable.name),
this.env.scope.owner.pkgID, foreachVariable.type,
this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef foreachVarRef = ASTBuilderUtil.createVariableRef(pos, foreachVariable.symbol);
foreach.variableDefinitionNode = ASTBuilderUtil.createVariableDef(pos, foreachVariable);
foreach.isDeclaredWithVar = true;
BLangBlockStmt foreachBody = ASTBuilderUtil.createBlockStmt(pos);
BLangIndexBasedAccess valueExpr = ASTBuilderUtil.createIndexAccessExpr(varargRef, foreachVarRef);
valueExpr.type = varargVarType.tag == TypeTags.ARRAY ? ((BArrayType) varargVarType).eType :
symTable.anyType;
BLangExpression pushExpr = addConversionExprIfRequired(valueExpr, restParamType.eType);
BLangExpressionStmt expressionStmt = createExpressionStmt(pos, foreachBody);
BLangInvocation pushInvocation = createLangLibInvocationNode(PUSH_LANGLIB_METHOD, arrayVarRef,
new ArrayList<BLangExpression>() {{
add(pushExpr);
}}, restParamType, pos);
pushInvocation.restArgs.add(pushInvocation.requiredArgs.remove(1));
expressionStmt.expr = pushInvocation;
foreach.body = foreachBody;
BLangBlockStmt newArrayBlockStmt = createBlockStmt(pos);
newArrayBlockStmt.addStatement(varDef);
newArrayBlockStmt.addStatement(foreach);
BLangStatementExpression newArrayStmtExpression = createStatementExpression(newArrayBlockStmt, arrayVarRef);
newArrayStmtExpression.type = restParamType;
restArgs.add(addConversionExprIfRequired(newArrayStmtExpression, restParamType));
return;
}
BArrayType restParamType = (BArrayType) invokableSymbol.restParam.type;
BLangArrayLiteral arrayLiteral = (BLangArrayLiteral) TreeBuilder.createArrayLiteralExpressionNode();
arrayLiteral.type = restParamType;
BType elemType = restParamType.eType;
Location pos = restArgs.get(0).pos;
List<BLangExpression> exprs = new ArrayList<>();
for (int i = 0; i < restArgCount - 1; i++) {
exprs.add(addConversionExprIfRequired(restArgs.get(i), elemType));
}
arrayLiteral.exprs = exprs;
BLangRestArgsExpression pushRestArgsExpr = (BLangRestArgsExpression) TreeBuilder.createVarArgsNode();
pushRestArgsExpr.pos = pos;
pushRestArgsExpr.expr = restArgs.remove(restArgCount - 1);
String name = DESUGARED_VARARG_KEY + UNDERSCORE + this.varargCount++;
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(name), this.env.scope.owner.pkgID, restParamType,
this.env.scope.owner, pos, VIRTUAL);
BLangSimpleVarRef arrayVarRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
BLangSimpleVariable var = createVariable(pos, name, restParamType, arrayLiteral, varSymbol);
BLangSimpleVariableDef varDef = ASTBuilderUtil.createVariableDef(pos);
varDef.var = var;
varDef.type = restParamType;
BLangBlockStmt pushBlockStmt = createBlockStmt(pos);
pushBlockStmt.stmts.add(varDef);
BLangExpressionStmt expressionStmt = createExpressionStmt(pos, pushBlockStmt);
BLangInvocation pushInvocation = createLangLibInvocationNode(PUSH_LANGLIB_METHOD, arrayVarRef,
new ArrayList<BLangExpression>() {{
add(pushRestArgsExpr);
}}, restParamType, pos);
pushInvocation.restArgs.add(pushInvocation.requiredArgs.remove(1));
expressionStmt.expr = pushInvocation;
BLangStatementExpression stmtExpression = createStatementExpression(pushBlockStmt, arrayVarRef);
stmtExpression.type = restParamType;
iExpr.restArgs = new ArrayList<BLangExpression>(1) {{ add(stmtExpression); }};
}
private void reorderNamedArgs(BLangInvocation iExpr, BInvokableSymbol invokableSymbol, BLangExpression varargRef) {
List<BLangExpression> args = new ArrayList<>();
Map<String, BLangExpression> namedArgs = new HashMap<>();
iExpr.requiredArgs.stream()
.filter(expr -> expr.getKind() == NodeKind.NAMED_ARGS_EXPR)
.forEach(expr -> namedArgs.put(((NamedArgNode) expr).getName().value, expr));
List<BVarSymbol> params = invokableSymbol.params;
List<BLangRecordLiteral> incRecordLiterals = new ArrayList<>();
BLangRecordLiteral incRecordParamAllowAdditionalFields = null;
int varargIndex = 0;
BType varargType = null;
boolean tupleTypedVararg = false;
if (varargRef != null) {
varargType = varargRef.type;
tupleTypedVararg = varargType.tag == TypeTags.TUPLE;
}
for (int i = 0; i < params.size(); i++) {
BVarSymbol param = params.get(i);
if (iExpr.requiredArgs.size() > i && iExpr.requiredArgs.get(i).getKind() != NodeKind.NAMED_ARGS_EXPR) {
args.add(iExpr.requiredArgs.get(i));
} else if (namedArgs.containsKey(param.name.value)) {
args.add(namedArgs.remove(param.name.value));
} else if (param.getFlags().contains(Flag.INCLUDED)) {
BLangRecordLiteral recordLiteral = (BLangRecordLiteral) TreeBuilder.createRecordLiteralNode();
BType paramType = param.type;
recordLiteral.type = paramType;
args.add(recordLiteral);
incRecordLiterals.add(recordLiteral);
if (((BRecordType) paramType).restFieldType != symTable.noType) {
incRecordParamAllowAdditionalFields = recordLiteral;
}
} else if (varargRef == null) {
BLangExpression expr = new BLangIgnoreExpr();
expr.type = param.type;
args.add(expr);
} else {
if (varargRef.type.tag == TypeTags.RECORD) {
if (param.defaultableParam) {
BLangInvocation hasKeyInvocation = createLangLibInvocationNode(HAS_KEY, varargRef,
List.of(createStringLiteral(param.pos, param.name.value)), null, varargRef.pos);
BLangExpression indexExpr = rewriteExpr(createStringLiteral(param.pos, param.name.value));
BLangIndexBasedAccess memberAccessExpr =
ASTBuilderUtil.createMemberAccessExprNode(param.type, varargRef, indexExpr);
BLangExpression ignoreExpr = ASTBuilderUtil.createIgnoreExprNode(param.type);
BLangTernaryExpr ternaryExpr = ASTBuilderUtil.createTernaryExprNode(param.type,
hasKeyInvocation, memberAccessExpr, ignoreExpr);
args.add(ASTBuilderUtil.createDynamicParamExpression(hasKeyInvocation, ternaryExpr));
} else {
BLangFieldBasedAccess fieldBasedAccessExpression =
ASTBuilderUtil.createFieldAccessExpr((BLangAccessibleExpression) varargRef,
ASTBuilderUtil.createIdentifier(param.pos, param.name.value));
fieldBasedAccessExpression.type = param.type;
args.add(fieldBasedAccessExpression);
}
} else {
BLangExpression indexExpr = rewriteExpr(createIntLiteral(varargIndex));
BType memberAccessExprType = tupleTypedVararg ?
((BTupleType) varargType).tupleTypes.get(varargIndex) : ((BArrayType) varargType).eType;
args.add(addConversionExprIfRequired(ASTBuilderUtil.createMemberAccessExprNode(memberAccessExprType,
varargRef, indexExpr), param.type));
varargIndex++;
}
}
}
if (namedArgs.size() > 0) {
setFieldsForIncRecordLiterals(namedArgs, incRecordLiterals, incRecordParamAllowAdditionalFields);
}
iExpr.requiredArgs = args;
}
private void setFieldsForIncRecordLiterals(Map<String, BLangExpression> namedArgs,
List<BLangRecordLiteral> incRecordLiterals,
BLangRecordLiteral incRecordParamAllowAdditionalFields) {
for (String name : namedArgs.keySet()) {
boolean isAdditionalField = true;
BLangNamedArgsExpression expr = (BLangNamedArgsExpression) namedArgs.get(name);
for (BLangRecordLiteral recordLiteral : incRecordLiterals) {
LinkedHashMap<String, BField> fields = ((BRecordType) recordLiteral.type).fields;
if (fields.containsKey(name) && fields.get(name).type.tag != TypeTags.NEVER) {
isAdditionalField = false;
createAndAddRecordFieldForIncRecordLiteral(recordLiteral, expr);
break;
}
}
if (isAdditionalField) {
createAndAddRecordFieldForIncRecordLiteral(incRecordParamAllowAdditionalFields, expr);
}
}
}
private void createAndAddRecordFieldForIncRecordLiteral(BLangRecordLiteral recordLiteral,
BLangNamedArgsExpression expr) {
BLangSimpleVarRef varRef = new BLangSimpleVarRef();
varRef.variableName = expr.name;
BLangRecordLiteral.BLangRecordKeyValueField recordKeyValueField = ASTBuilderUtil.
createBLangRecordKeyValue(varRef, expr.expr);
recordLiteral.fields.add(recordKeyValueField);
}
private BLangMatchTypedBindingPatternClause getSafeAssignErrorPattern(Location location,
BSymbol invokableSymbol,
List<BType> equivalentErrorTypes,
boolean isCheckPanicExpr) {
BType enclosingFuncReturnType = ((BInvokableType) invokableSymbol.type).retType;
Set<BType> returnTypeSet = enclosingFuncReturnType.tag == TypeTags.UNION ?
((BUnionType) enclosingFuncReturnType).getMemberTypes() :
new LinkedHashSet<BType>() {{
add(enclosingFuncReturnType);
}};
boolean returnOnError = equivalentErrorTypes.stream()
.allMatch(errorType -> returnTypeSet.stream()
.anyMatch(retType -> types.isAssignable(errorType, retType)));
String patternFailureCaseVarName = GEN_VAR_PREFIX.value + "t_failure";
BLangSimpleVariable patternFailureCaseVar =
ASTBuilderUtil.createVariable(location, patternFailureCaseVarName, symTable.errorType, null,
new BVarSymbol(0, names.fromString(patternFailureCaseVarName),
this.env.scope.owner.pkgID, symTable.errorType,
this.env.scope.owner, location, VIRTUAL));
BLangVariableReference patternFailureCaseVarRef =
ASTBuilderUtil.createVariableRef(location, patternFailureCaseVar.symbol);
BLangBlockStmt patternBlockFailureCase = (BLangBlockStmt) TreeBuilder.createBlockNode();
patternBlockFailureCase.pos = location;
if (!isCheckPanicExpr && (returnOnError || this.onFailClause != null)) {
BLangFail failStmt = (BLangFail) TreeBuilder.createFailNode();
failStmt.pos = location;
failStmt.expr = patternFailureCaseVarRef;
patternBlockFailureCase.stmts.add(failStmt);
if (returnOnError && this.shouldReturnErrors) {
BLangReturn errorReturn = ASTBuilderUtil.createReturnStmt(location,
rewrite(patternFailureCaseVarRef, env));
errorReturn.desugared = true;
failStmt.exprStmt = errorReturn;
}
} else {
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.pos = location;
panicNode.expr = patternFailureCaseVarRef;
patternBlockFailureCase.stmts.add(panicNode);
}
return ASTBuilderUtil.createMatchStatementPattern(location, patternFailureCaseVar, patternBlockFailureCase);
}
private BLangMatchTypedBindingPatternClause getSafeAssignSuccessPattern(Location location,
BType lhsType, boolean isVarDef, BVarSymbol varSymbol, BLangExpression lhsExpr) {
String patternSuccessCaseVarName = GEN_VAR_PREFIX.value + "t_match";
BLangSimpleVariable patternSuccessCaseVar =
ASTBuilderUtil.createVariable(location, patternSuccessCaseVarName, lhsType, null,
new BVarSymbol(0, names.fromString(patternSuccessCaseVarName),
this.env.scope.owner.pkgID, lhsType,
this.env.scope.owner, location, VIRTUAL));
BLangExpression varRefExpr;
if (isVarDef) {
varRefExpr = ASTBuilderUtil.createVariableRef(location, varSymbol);
} else {
varRefExpr = lhsExpr;
}
BLangVariableReference patternSuccessCaseVarRef = ASTBuilderUtil.createVariableRef(location,
patternSuccessCaseVar.symbol);
BLangAssignment assignmentStmtSuccessCase = ASTBuilderUtil.createAssignmentStmt(location,
varRefExpr, patternSuccessCaseVarRef, false);
BLangBlockStmt patternBlockSuccessCase = ASTBuilderUtil.createBlockStmt(location,
new ArrayList<BLangStatement>() {{
add(assignmentStmtSuccessCase);
}});
return ASTBuilderUtil.createMatchStatementPattern(location,
patternSuccessCaseVar, patternBlockSuccessCase);
}
private BLangStatement generateIfElseStmt(BLangMatch matchStmt, BLangSimpleVariable matchExprVar) {
List<BLangMatchBindingPatternClause> patterns = matchStmt.patternClauses;
BLangIf parentIfNode = generateIfElseStmt(patterns.get(0), matchExprVar);
BLangIf currentIfNode = parentIfNode;
for (int i = 1; i < patterns.size(); i++) {
BLangMatchBindingPatternClause patternClause = patterns.get(i);
if (i == patterns.size() - 1 && patternClause.isLastPattern) {
currentIfNode.elseStmt = getMatchPatternElseBody(patternClause, matchExprVar);
} else {
currentIfNode.elseStmt = generateIfElseStmt(patternClause, matchExprVar);
currentIfNode = (BLangIf) currentIfNode.elseStmt;
}
}
return parentIfNode;
}
/**
* Generate an if-else statement from the given match statement.
*
* @param pattern match pattern statement node
* @param matchExprVar variable node of the match expression
* @return if else statement node
*/
private BLangIf generateIfElseStmt(BLangMatchBindingPatternClause pattern, BLangSimpleVariable matchExprVar) {
BLangExpression ifCondition = createPatternIfCondition(pattern, matchExprVar.symbol);
if (NodeKind.MATCH_TYPED_PATTERN_CLAUSE == pattern.getKind()) {
BLangBlockStmt patternBody = getMatchPatternBody(pattern, matchExprVar);
return ASTBuilderUtil.createIfElseStmt(pattern.pos, ifCondition, patternBody, null);
}
BType expectedType = matchExprVar.type;
if (pattern.getKind() == NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE) {
BLangMatchStructuredBindingPatternClause matchPattern = (BLangMatchStructuredBindingPatternClause) pattern;
expectedType = getStructuredBindingPatternType(matchPattern.bindingPatternVariable);
}
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == pattern.getKind()) {
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) pattern;
BLangSimpleVariableDef varDef = forceCastIfApplicable(matchExprVar.symbol, pattern.pos, expectedType);
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(pattern.pos, varDef.var.symbol);
structuredPattern.bindingPatternVariable.expr = matchExprVarRef;
BLangStatement varDefStmt;
if (NodeKind.TUPLE_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createTupleVariableDef(pattern.pos,
(BLangTupleVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.RECORD_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createRecordVariableDef(pattern.pos,
(BLangRecordVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.ERROR_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createErrorVariableDef(pattern.pos,
(BLangErrorVariable) structuredPattern.bindingPatternVariable);
} else {
varDefStmt = ASTBuilderUtil
.createVariableDef(pattern.pos, (BLangSimpleVariable) structuredPattern.bindingPatternVariable);
}
if (structuredPattern.typeGuardExpr != null) {
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(structuredPattern.pos);
blockStmt.addStatement(varDef);
blockStmt.addStatement(varDefStmt);
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt,
structuredPattern.typeGuardExpr);
stmtExpr.type = symTable.booleanType;
ifCondition = ASTBuilderUtil
.createBinaryExpr(pattern.pos, ifCondition, stmtExpr, symTable.booleanType, OperatorKind.AND,
(BOperatorSymbol) symResolver
.resolveBinaryOperator(OperatorKind.AND, symTable.booleanType,
symTable.booleanType));
} else {
structuredPattern.body.stmts.add(0, varDef);
structuredPattern.body.stmts.add(1, varDefStmt);
}
}
return ASTBuilderUtil.createIfElseStmt(pattern.pos, ifCondition, pattern.body, null);
}
private BLangBlockStmt getMatchPatternBody(BLangMatchBindingPatternClause pattern,
BLangSimpleVariable matchExprVar) {
BLangBlockStmt body;
BLangMatchTypedBindingPatternClause patternClause = (BLangMatchTypedBindingPatternClause) pattern;
if (patternClause.variable.name.value.equals(Names.IGNORE.value)) {
return patternClause.body;
}
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(patternClause.pos,
matchExprVar.symbol);
BLangExpression patternVarExpr = addConversionExprIfRequired(matchExprVarRef, patternClause.variable.type);
BLangSimpleVariable patternVar = ASTBuilderUtil.createVariable(patternClause.pos, "",
patternClause.variable.type, patternVarExpr, patternClause.variable.symbol);
BLangSimpleVariableDef patternVarDef = ASTBuilderUtil.createVariableDef(patternVar.pos, patternVar);
patternClause.body.stmts.add(0, patternVarDef);
body = patternClause.body;
return body;
}
private BLangBlockStmt getMatchPatternElseBody(BLangMatchBindingPatternClause pattern,
BLangSimpleVariable matchExprVar) {
BLangBlockStmt body = pattern.body;
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == pattern.getKind()) {
BLangSimpleVarRef matchExprVarRef = ASTBuilderUtil.createVariableRef(pattern.pos, matchExprVar.symbol);
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) pattern;
structuredPattern.bindingPatternVariable.expr = matchExprVarRef;
BLangStatement varDefStmt;
if (NodeKind.TUPLE_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createTupleVariableDef(pattern.pos,
(BLangTupleVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.RECORD_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createRecordVariableDef(pattern.pos,
(BLangRecordVariable) structuredPattern.bindingPatternVariable);
} else if (NodeKind.ERROR_VARIABLE == structuredPattern.bindingPatternVariable.getKind()) {
varDefStmt = ASTBuilderUtil.createErrorVariableDef(pattern.pos,
(BLangErrorVariable) structuredPattern.bindingPatternVariable);
} else {
varDefStmt = ASTBuilderUtil
.createVariableDef(pattern.pos, (BLangSimpleVariable) structuredPattern.bindingPatternVariable);
}
structuredPattern.body.stmts.add(0, varDefStmt);
body = structuredPattern.body;
}
return body;
}
BLangExpression addConversionExprIfRequired(BLangExpression expr, BType lhsType) {
if (lhsType.tag == TypeTags.NONE) {
return expr;
}
BType rhsType = expr.type;
if (types.isSameType(rhsType, lhsType)) {
return expr;
}
types.setImplicitCastExpr(expr, rhsType, lhsType);
if (expr.impConversionExpr != null) {
return expr;
}
if (lhsType.tag == TypeTags.JSON && rhsType.tag == TypeTags.NIL) {
return expr;
}
if (lhsType.tag == TypeTags.NIL && rhsType.isNullable()) {
return expr;
}
if (lhsType.tag == TypeTags.ARRAY && rhsType.tag == TypeTags.TUPLE) {
return expr;
}
BLangTypeConversionExpr conversionExpr = (BLangTypeConversionExpr)
TreeBuilder.createTypeConversionNode();
conversionExpr.expr = expr;
conversionExpr.targetType = lhsType;
conversionExpr.type = lhsType;
conversionExpr.pos = expr.pos;
conversionExpr.checkTypes = false;
conversionExpr.internal = true;
return conversionExpr;
}
private BLangExpression createPatternIfCondition(BLangMatchBindingPatternClause patternClause,
BVarSymbol varSymbol) {
BType patternType;
switch (patternClause.getKind()) {
case MATCH_STATIC_PATTERN_CLAUSE:
BLangMatchStaticBindingPatternClause staticPattern =
(BLangMatchStaticBindingPatternClause) patternClause;
patternType = staticPattern.literal.type;
break;
case MATCH_STRUCTURED_PATTERN_CLAUSE:
BLangMatchStructuredBindingPatternClause structuredPattern =
(BLangMatchStructuredBindingPatternClause) patternClause;
patternType = getStructuredBindingPatternType(structuredPattern.bindingPatternVariable);
break;
default:
BLangMatchTypedBindingPatternClause simplePattern = (BLangMatchTypedBindingPatternClause) patternClause;
patternType = simplePattern.variable.type;
break;
}
BLangExpression binaryExpr;
BType[] memberTypes;
if (patternType.tag == TypeTags.UNION) {
BUnionType unionType = (BUnionType) patternType;
memberTypes = unionType.getMemberTypes().toArray(new BType[0]);
} else {
memberTypes = new BType[1];
memberTypes[0] = patternType;
}
if (memberTypes.length == 1) {
binaryExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[0]);
} else {
BLangExpression lhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[0]);
BLangExpression rhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[1]);
binaryExpr = ASTBuilderUtil.createBinaryExpr(patternClause.pos, lhsExpr, rhsExpr,
symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.OR,
lhsExpr.type, rhsExpr.type));
for (int i = 2; i < memberTypes.length; i++) {
lhsExpr = createPatternMatchBinaryExpr(patternClause, varSymbol, memberTypes[i]);
rhsExpr = binaryExpr;
binaryExpr = ASTBuilderUtil.createBinaryExpr(patternClause.pos, lhsExpr, rhsExpr,
symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.OR,
lhsExpr.type, rhsExpr.type));
}
}
return binaryExpr;
}
private BType getStructuredBindingPatternType(BLangVariable bindingPatternVariable) {
if (NodeKind.TUPLE_VARIABLE == bindingPatternVariable.getKind()) {
BLangTupleVariable tupleVariable = (BLangTupleVariable) bindingPatternVariable;
List<BType> memberTypes = new ArrayList<>();
for (int i = 0; i < tupleVariable.memberVariables.size(); i++) {
memberTypes.add(getStructuredBindingPatternType(tupleVariable.memberVariables.get(i)));
}
BTupleType tupleType = new BTupleType(memberTypes);
if (tupleVariable.restVariable != null) {
BArrayType restArrayType = (BArrayType) getStructuredBindingPatternType(tupleVariable.restVariable);
tupleType.restType = restArrayType.eType;
}
return tupleType;
}
if (NodeKind.RECORD_VARIABLE == bindingPatternVariable.getKind()) {
BLangRecordVariable recordVariable = (BLangRecordVariable) bindingPatternVariable;
BRecordTypeSymbol recordSymbol =
Symbols.createRecordSymbol(0, names.fromString("$anonRecordType$" + UNDERSCORE + recordCount++),
env.enclPkg.symbol.pkgID, null, env.scope.owner, recordVariable.pos,
VIRTUAL);
recordSymbol.initializerFunc = createRecordInitFunc();
recordSymbol.scope = new Scope(recordSymbol);
recordSymbol.scope.define(
names.fromString(recordSymbol.name.value + "." + recordSymbol.initializerFunc.funcName.value),
recordSymbol.initializerFunc.symbol);
LinkedHashMap<String, BField> fields = new LinkedHashMap<>();
List<BLangSimpleVariable> typeDefFields = new ArrayList<>();
for (int i = 0; i < recordVariable.variableList.size(); i++) {
String fieldNameStr = recordVariable.variableList.get(i).key.value;
Name fieldName = names.fromString(fieldNameStr);
BType fieldType = getStructuredBindingPatternType(
recordVariable.variableList.get(i).valueBindingPattern);
BVarSymbol fieldSymbol = new BVarSymbol(Flags.REQUIRED, fieldName, env.enclPkg.symbol.pkgID, fieldType,
recordSymbol, bindingPatternVariable.pos, VIRTUAL);
fields.put(fieldName.value, new BField(fieldName, bindingPatternVariable.pos, fieldSymbol));
typeDefFields.add(ASTBuilderUtil.createVariable(null, fieldNameStr, fieldType, null, fieldSymbol));
recordSymbol.scope.define(fieldName, fieldSymbol);
}
BRecordType recordVarType = new BRecordType(recordSymbol);
recordVarType.fields = fields;
recordVarType.restFieldType = recordVariable.restParam != null ?
((BMapType) ((BLangSimpleVariable) recordVariable.restParam).type).constraint :
symTable.anydataType;
recordSymbol.type = recordVarType;
recordVarType.tsymbol = recordSymbol;
BLangRecordTypeNode recordTypeNode = TypeDefBuilderHelper.createRecordTypeNode(typeDefFields,
recordVarType,
bindingPatternVariable.pos);
recordTypeNode.initFunction =
rewrite(TypeDefBuilderHelper.createInitFunctionForRecordType(recordTypeNode, env, names, symTable),
env);
TypeDefBuilderHelper.addTypeDefinition(recordVarType, recordSymbol, recordTypeNode, env);
return recordVarType;
}
if (NodeKind.ERROR_VARIABLE == bindingPatternVariable.getKind()) {
BLangErrorVariable errorVariable = (BLangErrorVariable) bindingPatternVariable;
BErrorTypeSymbol errorTypeSymbol = new BErrorTypeSymbol(
SymTag.ERROR,
Flags.PUBLIC,
names.fromString("$anonErrorType$" + UNDERSCORE + errorCount++),
env.enclPkg.symbol.pkgID,
null, null, errorVariable.pos, VIRTUAL);
BType detailType;
if ((errorVariable.detail == null || errorVariable.detail.isEmpty()) && errorVariable.restDetail != null) {
detailType = symTable.detailType;
} else {
detailType = createDetailType(errorVariable.detail, errorVariable.restDetail, errorCount++,
errorVariable.pos);
BLangRecordTypeNode recordTypeNode = createRecordTypeNode(errorVariable, (BRecordType) detailType);
recordTypeNode.initFunction = TypeDefBuilderHelper
.createInitFunctionForRecordType(recordTypeNode, env, names, symTable);
TypeDefBuilderHelper.addTypeDefinition(detailType, detailType.tsymbol, recordTypeNode, env);
}
BErrorType errorType = new BErrorType(errorTypeSymbol, detailType);
errorTypeSymbol.type = errorType;
TypeDefBuilderHelper.addTypeDefinition(errorType, errorTypeSymbol, createErrorTypeNode(errorType), env);
return errorType;
}
return bindingPatternVariable.type;
}
private BLangRecordTypeNode createRecordTypeNode(BLangErrorVariable errorVariable, BRecordType detailType) {
List<BLangSimpleVariable> fieldList = new ArrayList<>();
for (BLangErrorVariable.BLangErrorDetailEntry field : errorVariable.detail) {
BVarSymbol symbol = field.valueBindingPattern.symbol;
if (symbol == null) {
symbol = new BVarSymbol(Flags.PUBLIC, names.fromString(field.key.value + "$"),
this.env.enclPkg.packageID, symTable.pureType, null,
field.valueBindingPattern.pos, VIRTUAL);
}
BLangSimpleVariable fieldVar = ASTBuilderUtil.createVariable(
field.valueBindingPattern.pos,
symbol.name.value,
field.valueBindingPattern.type,
field.valueBindingPattern.expr,
symbol);
fieldList.add(fieldVar);
}
return TypeDefBuilderHelper.createRecordTypeNode(fieldList, detailType, errorVariable.pos);
}
private BType createDetailType(List<BLangErrorVariable.BLangErrorDetailEntry> detail,
BLangSimpleVariable restDetail, int errorNo, Location pos) {
BRecordTypeSymbol detailRecordTypeSymbol = new BRecordTypeSymbol(
SymTag.RECORD,
Flags.PUBLIC,
names.fromString("$anonErrorType$" + UNDERSCORE + errorNo + "$detailType"),
env.enclPkg.symbol.pkgID, null, null, pos, VIRTUAL);
detailRecordTypeSymbol.initializerFunc = createRecordInitFunc();
detailRecordTypeSymbol.scope = new Scope(detailRecordTypeSymbol);
detailRecordTypeSymbol.scope.define(
names.fromString(detailRecordTypeSymbol.name.value + "." +
detailRecordTypeSymbol.initializerFunc.funcName.value),
detailRecordTypeSymbol.initializerFunc.symbol);
BRecordType detailRecordType = new BRecordType(detailRecordTypeSymbol);
detailRecordType.restFieldType = symTable.anydataType;
if (restDetail == null) {
detailRecordType.sealed = true;
}
for (BLangErrorVariable.BLangErrorDetailEntry detailEntry : detail) {
Name fieldName = names.fromIdNode(detailEntry.key);
BType fieldType = getStructuredBindingPatternType(detailEntry.valueBindingPattern);
BVarSymbol fieldSym = new BVarSymbol(Flags.PUBLIC, fieldName, detailRecordTypeSymbol.pkgID, fieldType,
detailRecordTypeSymbol, detailEntry.key.pos, VIRTUAL);
detailRecordType.fields.put(fieldName.value, new BField(fieldName, detailEntry.key.pos, fieldSym));
detailRecordTypeSymbol.scope.define(fieldName, fieldSym);
}
return detailRecordType;
}
private BAttachedFunction createRecordInitFunc() {
BInvokableType bInvokableType = new BInvokableType(new ArrayList<>(), symTable.nilType, null);
BInvokableSymbol initFuncSymbol = Symbols.createFunctionSymbol(
Flags.PUBLIC, Names.EMPTY, env.enclPkg.symbol.pkgID, bInvokableType, env.scope.owner, false,
symTable.builtinPos, VIRTUAL);
initFuncSymbol.retType = symTable.nilType;
return new BAttachedFunction(Names.INIT_FUNCTION_SUFFIX, initFuncSymbol, bInvokableType, symTable.builtinPos);
}
BLangErrorType createErrorTypeNode(BErrorType errorType) {
BLangErrorType errorTypeNode = (BLangErrorType) TreeBuilder.createErrorTypeNode();
errorTypeNode.type = errorType;
return errorTypeNode;
}
private BLangExpression createPatternMatchBinaryExpr(BLangMatchBindingPatternClause patternClause,
BVarSymbol varSymbol, BType patternType) {
Location pos = patternClause.pos;
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
if (NodeKind.MATCH_STATIC_PATTERN_CLAUSE == patternClause.getKind()) {
BLangMatchStaticBindingPatternClause pattern = (BLangMatchStaticBindingPatternClause) patternClause;
return createBinaryExpression(pos, varRef, pattern.literal);
}
if (NodeKind.MATCH_STRUCTURED_PATTERN_CLAUSE == patternClause.getKind()) {
return createIsLikeExpression(pos, ASTBuilderUtil.createVariableRef(pos, varSymbol), patternType);
}
if (patternType == symTable.nilType) {
BLangLiteral bLangLiteral = ASTBuilderUtil.createLiteral(pos, symTable.nilType, null);
return ASTBuilderUtil.createBinaryExpr(pos, varRef, bLangLiteral, symTable.booleanType,
OperatorKind.EQUAL, (BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.EQUAL,
symTable.anyType, symTable.nilType));
} else {
return createIsAssignableExpression(pos, varSymbol, patternType);
}
}
private BLangExpression createBinaryExpression(Location pos, BLangSimpleVarRef varRef,
BLangExpression expression) {
BLangBinaryExpr binaryExpr;
if (NodeKind.GROUP_EXPR == expression.getKind()) {
return createBinaryExpression(pos, varRef, ((BLangGroupExpr) expression).expression);
}
if (NodeKind.BINARY_EXPR == expression.getKind()) {
binaryExpr = (BLangBinaryExpr) expression;
BLangExpression lhsExpr = createBinaryExpression(pos, varRef, binaryExpr.lhsExpr);
BLangExpression rhsExpr = createBinaryExpression(pos, varRef, binaryExpr.rhsExpr);
binaryExpr = ASTBuilderUtil.createBinaryExpr(pos, lhsExpr, rhsExpr, symTable.booleanType, OperatorKind.OR,
(BOperatorSymbol) symResolver
.resolveBinaryOperator(OperatorKind.OR, symTable.booleanType, symTable.booleanType));
} else if (expression.getKind() == NodeKind.SIMPLE_VARIABLE_REF
&& ((BLangSimpleVarRef) expression).variableName.value.equals(IGNORE.value)) {
BLangValueType anyType = (BLangValueType) TreeBuilder.createValueTypeNode();
anyType.type = symTable.anyType;
anyType.typeKind = TypeKind.ANY;
return ASTBuilderUtil.createTypeTestExpr(pos, varRef, anyType);
} else {
binaryExpr = ASTBuilderUtil
.createBinaryExpr(pos, varRef, expression, symTable.booleanType, OperatorKind.EQUAL, null);
BSymbol opSymbol = symResolver.resolveBinaryOperator(OperatorKind.EQUAL, varRef.type, expression.type);
if (opSymbol == symTable.notFoundSymbol) {
opSymbol = symResolver
.getBinaryEqualityForTypeSets(OperatorKind.EQUAL, symTable.anydataType, expression.type,
binaryExpr);
}
binaryExpr.opSymbol = (BOperatorSymbol) opSymbol;
}
return binaryExpr;
}
private BLangIsAssignableExpr createIsAssignableExpression(Location pos,
BVarSymbol varSymbol,
BType patternType) {
BLangSimpleVarRef varRef = ASTBuilderUtil.createVariableRef(pos, varSymbol);
return ASTBuilderUtil.createIsAssignableExpr(pos, varRef, patternType, symTable.booleanType, names,
symTable.builtinPos);
}
private BLangIsLikeExpr createIsLikeExpression(Location pos, BLangExpression expr, BType type) {
return ASTBuilderUtil.createIsLikeExpr(pos, expr, ASTBuilderUtil.createTypeNode(type), symTable.booleanType);
}
private BLangAssignment createAssignmentStmt(BLangSimpleVariable variable) {
BLangSimpleVarRef varRef = (BLangSimpleVarRef) TreeBuilder.createSimpleVariableReferenceNode();
varRef.pos = variable.pos;
varRef.variableName = variable.name;
varRef.symbol = variable.symbol;
varRef.type = variable.type;
BLangAssignment assignmentStmt = (BLangAssignment) TreeBuilder.createAssignmentNode();
assignmentStmt.expr = variable.expr;
assignmentStmt.pos = variable.pos;
assignmentStmt.setVariable(varRef);
return assignmentStmt;
}
private BLangAssignment createStructFieldUpdate(BLangFunction function, BLangSimpleVariable variable,
BVarSymbol selfSymbol) {
return createStructFieldUpdate(function, variable.expr, variable.symbol, variable.type, selfSymbol,
variable.name);
}
private BLangAssignment createStructFieldUpdate(BLangFunction function, BLangExpression expr,
BVarSymbol fieldSymbol, BType fieldType, BVarSymbol selfSymbol,
BLangIdentifier fieldName) {
BLangSimpleVarRef selfVarRef = ASTBuilderUtil.createVariableRef(function.pos, selfSymbol);
BLangFieldBasedAccess fieldAccess = ASTBuilderUtil.createFieldAccessExpr(selfVarRef, fieldName);
fieldAccess.symbol = fieldSymbol;
fieldAccess.type = fieldType;
fieldAccess.isStoreOnCreation = true;
BLangAssignment assignmentStmt = (BLangAssignment) TreeBuilder.createAssignmentNode();
assignmentStmt.expr = expr;
assignmentStmt.pos = function.pos;
assignmentStmt.setVariable(fieldAccess);
SymbolEnv initFuncEnv = SymbolEnv.createFunctionEnv(function, function.symbol.scope, env);
return rewrite(assignmentStmt, initFuncEnv);
}
private void addMatchExprDefaultCase(BLangMatchExpression bLangMatchExpression) {
List<BType> exprTypes;
List<BType> unmatchedTypes = new ArrayList<>();
if (bLangMatchExpression.expr.type.tag == TypeTags.UNION) {
BUnionType unionType = (BUnionType) bLangMatchExpression.expr.type;
exprTypes = new ArrayList<>(unionType.getMemberTypes());
} else {
exprTypes = Lists.of(bLangMatchExpression.type);
}
for (BType type : exprTypes) {
boolean assignable = false;
for (BLangMatchExprPatternClause pattern : bLangMatchExpression.patternClauses) {
if (this.types.isAssignable(type, pattern.variable.type)) {
assignable = true;
break;
}
}
if (!assignable) {
unmatchedTypes.add(type);
}
}
if (unmatchedTypes.isEmpty()) {
return;
}
BType defaultPatternType;
if (unmatchedTypes.size() == 1) {
defaultPatternType = unmatchedTypes.get(0);
} else {
defaultPatternType = BUnionType.create(null, new LinkedHashSet<>(unmatchedTypes));
}
String patternCaseVarName = GEN_VAR_PREFIX.value + "t_match_default";
BLangSimpleVariable patternMatchCaseVar =
ASTBuilderUtil.createVariable(bLangMatchExpression.pos, patternCaseVarName, defaultPatternType, null,
new BVarSymbol(0, names.fromString(patternCaseVarName),
this.env.scope.owner.pkgID, defaultPatternType,
this.env.scope.owner, bLangMatchExpression.pos, VIRTUAL));
BLangMatchExprPatternClause defaultPattern =
(BLangMatchExprPatternClause) TreeBuilder.createMatchExpressionPattern();
defaultPattern.variable = patternMatchCaseVar;
defaultPattern.expr = ASTBuilderUtil.createVariableRef(bLangMatchExpression.pos, patternMatchCaseVar.symbol);
defaultPattern.pos = bLangMatchExpression.pos;
bLangMatchExpression.patternClauses.add(defaultPattern);
}
private boolean safeNavigate(BLangAccessExpression accessExpr) {
if (accessExpr.lhsVar || accessExpr.expr == null) {
return false;
}
if (accessExpr.errorSafeNavigation || accessExpr.nilSafeNavigation) {
return true;
}
NodeKind kind = accessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR ||
kind == NodeKind.INDEX_BASED_ACCESS_EXPR) {
return safeNavigate((BLangAccessExpression) accessExpr.expr);
}
return false;
}
private BLangExpression rewriteSafeNavigationExpr(BLangAccessExpression accessExpr) {
BType originalExprType = accessExpr.type;
String matchTempResultVarName = GEN_VAR_PREFIX.value + "temp_result";
BLangSimpleVariable tempResultVar =
ASTBuilderUtil.createVariable(accessExpr.pos, matchTempResultVarName, accessExpr.type, null,
new BVarSymbol(0, names.fromString(matchTempResultVarName),
this.env.scope.owner.pkgID, accessExpr.type,
this.env.scope.owner, accessExpr.pos, VIRTUAL));
BLangSimpleVariableDef tempResultVarDef = ASTBuilderUtil.createVariableDef(accessExpr.pos, tempResultVar);
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(accessExpr.pos, tempResultVar.symbol);
handleSafeNavigation(accessExpr, accessExpr.type, tempResultVar);
BLangMatch matcEXpr = this.matchStmtStack.firstElement();
BLangBlockStmt blockStmt =
ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(tempResultVarDef, matcEXpr));
BLangStatementExpression stmtExpression = createStatementExpression(blockStmt, tempResultVarRef);
stmtExpression.type = originalExprType;
this.matchStmtStack = new Stack<>();
this.accessExprStack = new Stack<>();
this.successPattern = null;
this.safeNavigationAssignment = null;
return stmtExpression;
}
private void handleSafeNavigation(BLangAccessExpression accessExpr, BType type, BLangSimpleVariable tempResultVar) {
if (accessExpr.expr == null) {
return;
}
NodeKind kind = accessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR) {
handleSafeNavigation((BLangAccessExpression) accessExpr.expr, type, tempResultVar);
}
if (!(accessExpr.errorSafeNavigation || accessExpr.nilSafeNavigation)) {
BType originalType = accessExpr.originalType;
if (TypeTags.isXMLTypeTag(originalType.tag)) {
accessExpr.type = BUnionType.create(null, originalType, symTable.errorType);
} else {
accessExpr.type = originalType;
}
if (this.safeNavigationAssignment != null) {
this.safeNavigationAssignment.expr = addConversionExprIfRequired(accessExpr, tempResultVar.type);
}
return;
}
/*
* If the field access is a safe navigation, create a match expression.
* Then chain the current expression as the success-pattern of the parent
* match expr, if available.
* eg:
* x but { <--- parent match expr
* error e => e,
* T t => t.y but { <--- current expr
* error e => e,
* R r => r.z
* }
* }
*/
BLangMatch matchStmt = ASTBuilderUtil.createMatchStatement(accessExpr.pos, accessExpr.expr, new ArrayList<>());
boolean isAllTypesRecords = false;
LinkedHashSet<BType> memTypes = new LinkedHashSet<>();
if (accessExpr.expr.type.tag == TypeTags.UNION) {
memTypes = new LinkedHashSet<>(((BUnionType) accessExpr.expr.type).getMemberTypes());
isAllTypesRecords = isAllTypesAreRecordsInUnion(memTypes);
}
if (accessExpr.nilSafeNavigation) {
matchStmt.patternClauses.add(getMatchNullPattern(accessExpr, tempResultVar));
matchStmt.type = type;
memTypes.remove(symTable.nilType);
}
if (accessExpr.errorSafeNavigation) {
matchStmt.patternClauses.add(getMatchErrorPattern(accessExpr, tempResultVar));
matchStmt.type = type;
matchStmt.pos = accessExpr.pos;
memTypes.remove(symTable.errorType);
}
BLangMatchTypedBindingPatternClause successPattern = null;
Name field = getFieldName(accessExpr);
if (field == Names.EMPTY) {
successPattern = getSuccessPattern(accessExpr.expr.type, accessExpr, tempResultVar,
accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
return;
}
if (isAllTypesRecords) {
for (BType memberType : memTypes) {
if (((BRecordType) memberType).fields.containsKey(field.value)) {
successPattern = getSuccessPattern(memberType, accessExpr, tempResultVar,
accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
}
}
matchStmt.patternClauses.add(getMatchAllAndNilReturnPattern(accessExpr, tempResultVar));
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
return;
}
successPattern =
getSuccessPattern(accessExpr.expr.type, accessExpr, tempResultVar, accessExpr.errorSafeNavigation);
matchStmt.patternClauses.add(successPattern);
pushToMatchStatementStack(matchStmt, accessExpr, successPattern);
}
private void pushToMatchStatementStack(BLangMatch matchStmt, BLangAccessExpression accessExpr,
BLangMatchTypedBindingPatternClause successPattern) {
this.matchStmtStack.push(matchStmt);
if (this.successPattern != null) {
this.successPattern.body = ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(matchStmt));
}
this.successPattern = successPattern;
}
private Name getFieldName(BLangAccessExpression accessExpr) {
Name field = Names.EMPTY;
if (accessExpr.getKind() == NodeKind.FIELD_BASED_ACCESS_EXPR) {
field = new Name(((BLangFieldBasedAccess) accessExpr).field.value);
} else if (accessExpr.getKind() == NodeKind.INDEX_BASED_ACCESS_EXPR) {
BLangExpression indexBasedExpression = ((BLangIndexBasedAccess) accessExpr).indexExpr;
if (indexBasedExpression.getKind() == NodeKind.LITERAL) {
field = new Name(((BLangLiteral) indexBasedExpression).value.toString());
}
}
return field;
}
private boolean isAllTypesAreRecordsInUnion(LinkedHashSet<BType> memTypes) {
for (BType memType : memTypes) {
int typeTag = memType.tag;
if (typeTag != TypeTags.RECORD && typeTag != TypeTags.ERROR && typeTag != TypeTags.NIL) {
return false;
}
}
return true;
}
private BLangMatchTypedBindingPatternClause getMatchErrorPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
String errorPatternVarName = GEN_VAR_PREFIX.value + "t_match_error";
BLangSimpleVariable errorPatternVar =
ASTBuilderUtil.createVariable(expr.pos, errorPatternVarName, symTable.errorType, null,
new BVarSymbol(0, names.fromString(errorPatternVarName),
this.env.scope.owner.pkgID, symTable.errorType,
this.env.scope.owner, expr.pos, VIRTUAL));
BLangSimpleVarRef assignmentRhsExpr = ASTBuilderUtil.createVariableRef(expr.pos, errorPatternVar.symbol);
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause errorPattern = ASTBuilderUtil
.createMatchStatementPattern(expr.pos, errorPatternVar, patternBody);
return errorPattern;
}
private BLangMatchExprPatternClause getMatchNullPatternGivenExpression(Location pos,
BLangExpression expr) {
String nullPatternVarName = IGNORE.toString();
BLangSimpleVariable errorPatternVar =
ASTBuilderUtil.createVariable(pos, nullPatternVarName, symTable.nilType, null,
new BVarSymbol(0, names.fromString(nullPatternVarName),
this.env.scope.owner.pkgID, symTable.nilType,
this.env.scope.owner, pos, VIRTUAL));
BLangMatchExprPatternClause nullPattern =
(BLangMatchExprPatternClause) TreeBuilder.createMatchExpressionPattern();
nullPattern.variable = errorPatternVar;
nullPattern.expr = expr;
nullPattern.pos = pos;
return nullPattern;
}
private BLangMatchTypedBindingPatternClause getMatchNullPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
String nullPatternVarName = GEN_VAR_PREFIX.value + "t_match_null";
BLangSimpleVariable nullPatternVar =
ASTBuilderUtil.createVariable(expr.pos, nullPatternVarName, symTable.nilType, null,
new BVarSymbol(0, names.fromString(nullPatternVarName),
this.env.scope.owner.pkgID, symTable.nilType,
this.env.scope.owner, expr.pos, VIRTUAL));
BLangSimpleVarRef assignmentRhsExpr = ASTBuilderUtil.createVariableRef(expr.pos, nullPatternVar.symbol);
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause nullPattern = ASTBuilderUtil
.createMatchStatementPattern(expr.pos, nullPatternVar, patternBody);
return nullPattern;
}
private BLangMatchStaticBindingPatternClause getMatchAllAndNilReturnPattern(BLangExpression expr,
BLangSimpleVariable tempResultVar) {
BLangVariableReference tempResultVarRef = ASTBuilderUtil.createVariableRef(expr.pos, tempResultVar.symbol);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(expr.pos, tempResultVarRef, createLiteral(expr.pos,
symTable.nilType, Names.NIL_VALUE), false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(expr.pos, Lists.of(assignmentStmt));
BLangMatchStaticBindingPatternClause matchAllPattern =
(BLangMatchStaticBindingPatternClause) TreeBuilder.createMatchStatementStaticBindingPattern();
String matchAllVarName = "_";
matchAllPattern.literal =
ASTBuilderUtil.createVariableRef(expr.pos, new BVarSymbol(0, names.fromString(matchAllVarName),
this.env.scope.owner.pkgID, symTable.anyType,
this.env.scope.owner, expr.pos, VIRTUAL));
matchAllPattern.body = patternBody;
return matchAllPattern;
}
private BLangMatchTypedBindingPatternClause getSuccessPattern(BType type, BLangAccessExpression accessExpr,
BLangSimpleVariable tempResultVar,
boolean liftError) {
type = types.getSafeType(type, true, liftError);
String successPatternVarName = GEN_VAR_PREFIX.value + "t_match_success";
BVarSymbol successPatternSymbol;
if (type.tag == TypeTags.INVOKABLE) {
successPatternSymbol = new BInvokableSymbol(SymTag.VARIABLE, 0, names.fromString(successPatternVarName),
this.env.scope.owner.pkgID, type, this.env.scope.owner,
accessExpr.pos, VIRTUAL);
} else {
successPatternSymbol = new BVarSymbol(0, names.fromString(successPatternVarName),
this.env.scope.owner.pkgID, type, this.env.scope.owner,
accessExpr.pos, VIRTUAL);
}
BLangSimpleVariable successPatternVar = ASTBuilderUtil.createVariable(accessExpr.pos, successPatternVarName,
type, null, successPatternSymbol);
BLangAccessExpression tempAccessExpr = nodeCloner.clone(accessExpr);
if (accessExpr.getKind() == NodeKind.INDEX_BASED_ACCESS_EXPR) {
((BLangIndexBasedAccess) tempAccessExpr).indexExpr = ((BLangIndexBasedAccess) accessExpr).indexExpr;
}
if (accessExpr instanceof BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) {
((BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) tempAccessExpr).nsSymbol =
((BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess) accessExpr).nsSymbol;
}
tempAccessExpr.expr = ASTBuilderUtil.createVariableRef(accessExpr.pos, successPatternVar.symbol);
tempAccessExpr.errorSafeNavigation = false;
tempAccessExpr.nilSafeNavigation = false;
accessExpr.cloneRef = null;
if (TypeTags.isXMLTypeTag(tempAccessExpr.expr.type.tag)) {
tempAccessExpr.type = BUnionType.create(null, accessExpr.originalType, symTable.errorType,
symTable.nilType);
} else {
tempAccessExpr.type = accessExpr.originalType;
}
tempAccessExpr.optionalFieldAccess = accessExpr.optionalFieldAccess;
BLangVariableReference tempResultVarRef =
ASTBuilderUtil.createVariableRef(accessExpr.pos, tempResultVar.symbol);
BLangExpression assignmentRhsExpr = addConversionExprIfRequired(tempAccessExpr, tempResultVarRef.type);
BLangAssignment assignmentStmt =
ASTBuilderUtil.createAssignmentStmt(accessExpr.pos, tempResultVarRef, assignmentRhsExpr, false);
BLangBlockStmt patternBody = ASTBuilderUtil.createBlockStmt(accessExpr.pos, Lists.of(assignmentStmt));
BLangMatchTypedBindingPatternClause successPattern =
ASTBuilderUtil.createMatchStatementPattern(accessExpr.pos, successPatternVar, patternBody);
this.safeNavigationAssignment = assignmentStmt;
return successPattern;
}
private boolean safeNavigateLHS(BLangExpression expr) {
if (expr.getKind() != NodeKind.FIELD_BASED_ACCESS_EXPR && expr.getKind() != NodeKind.INDEX_BASED_ACCESS_EXPR) {
return false;
}
BLangExpression varRef = ((BLangAccessExpression) expr).expr;
if (varRef.type.isNullable()) {
return true;
}
return safeNavigateLHS(varRef);
}
private BLangStatement rewriteSafeNavigationAssignment(BLangAccessExpression accessExpr, BLangExpression rhsExpr,
boolean safeAssignment) {
this.accessExprStack = new Stack<>();
List<BLangStatement> stmts = new ArrayList<>();
createLHSSafeNavigation(stmts, accessExpr.expr);
BLangAssignment assignment = ASTBuilderUtil.createAssignmentStmt(accessExpr.pos,
cloneExpression(accessExpr), rhsExpr);
stmts.add(assignment);
return ASTBuilderUtil.createBlockStmt(accessExpr.pos, stmts);
}
private void createLHSSafeNavigation(List<BLangStatement> stmts, BLangExpression expr) {
NodeKind kind = expr.getKind();
boolean root = false;
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR ||
kind == NodeKind.INVOCATION) {
BLangAccessExpression accessExpr = (BLangAccessExpression) expr;
createLHSSafeNavigation(stmts, accessExpr.expr);
accessExpr.expr = accessExprStack.pop();
} else {
root = true;
}
if (expr.getKind() == NodeKind.INVOCATION) {
BLangInvocation invocation = (BLangInvocation) expr;
BVarSymbol interMediateSymbol = new BVarSymbol(0,
names.fromString(GEN_VAR_PREFIX.value + "i_intermediate"),
this.env.scope.owner.pkgID, invocation.type,
this.env.scope.owner, expr.pos, VIRTUAL);
BLangSimpleVariable intermediateVariable = ASTBuilderUtil.createVariable(expr.pos,
interMediateSymbol.name.value,
invocation.type, invocation,
interMediateSymbol);
BLangSimpleVariableDef intermediateVariableDefinition = ASTBuilderUtil.createVariableDef(invocation.pos,
intermediateVariable);
stmts.add(intermediateVariableDefinition);
expr = ASTBuilderUtil.createVariableRef(invocation.pos, interMediateSymbol);
}
if (expr.type.isNullable()) {
BLangTypeTestExpr isNillTest = ASTBuilderUtil.createTypeTestExpr(expr.pos, expr, getNillTypeNode());
isNillTest.type = symTable.booleanType;
BLangBlockStmt thenStmt = ASTBuilderUtil.createBlockStmt(expr.pos);
expr = cloneExpression(expr);
expr.type = types.getSafeType(expr.type, true, false);
if (isDefaultableMappingType(expr.type) && !root) {
BLangRecordLiteral jsonLiteral = (BLangRecordLiteral) TreeBuilder.createRecordLiteralNode();
jsonLiteral.type = expr.type;
jsonLiteral.pos = expr.pos;
BLangAssignment assignment = ASTBuilderUtil.createAssignmentStmt(expr.pos,
expr, jsonLiteral);
thenStmt.addStatement(assignment);
} else {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = ERROR_REASON_NULL_REFERENCE_ERROR;
literal.type = symTable.stringType;
BLangErrorConstructorExpr errorConstructorExpr =
(BLangErrorConstructorExpr) TreeBuilder.createErrorConstructorExpressionNode();
BSymbol symbol = symResolver.lookupMainSpaceSymbolInPackage(errorConstructorExpr.pos, env,
names.fromString(""), names.fromString("error"));
errorConstructorExpr.type = symbol.type;
errorConstructorExpr.pos = expr.pos;
List<BLangExpression> positionalArgs = new ArrayList<>();
positionalArgs.add(literal);
errorConstructorExpr.positionalArgs = positionalArgs;
BLangPanic panicNode = (BLangPanic) TreeBuilder.createPanicNode();
panicNode.expr = errorConstructorExpr;
panicNode.pos = expr.pos;
thenStmt.addStatement(panicNode);
}
BLangIf ifelse = ASTBuilderUtil.createIfElseStmt(expr.pos, isNillTest, thenStmt, null);
stmts.add(ifelse);
}
accessExprStack.push(expr);
}
BLangValueType getNillTypeNode() {
BLangValueType nillTypeNode = (BLangValueType) TreeBuilder.createValueTypeNode();
nillTypeNode.typeKind = TypeKind.NIL;
nillTypeNode.type = symTable.nilType;
return nillTypeNode;
}
private BLangAccessExpression cloneExpression(BLangExpression expr) {
switch (expr.getKind()) {
case SIMPLE_VARIABLE_REF:
return ASTBuilderUtil.createVariableRef(expr.pos, ((BLangSimpleVarRef) expr).symbol);
case FIELD_BASED_ACCESS_EXPR:
case INDEX_BASED_ACCESS_EXPR:
case INVOCATION:
return cloneAccessExpr((BLangAccessExpression) expr);
default:
throw new IllegalStateException();
}
}
private BLangAccessExpression cloneAccessExpr(BLangAccessExpression originalAccessExpr) {
if (originalAccessExpr.expr == null) {
return originalAccessExpr;
}
BLangExpression varRef;
NodeKind kind = originalAccessExpr.expr.getKind();
if (kind == NodeKind.FIELD_BASED_ACCESS_EXPR || kind == NodeKind.INDEX_BASED_ACCESS_EXPR ||
kind == NodeKind.INVOCATION) {
varRef = cloneAccessExpr((BLangAccessExpression) originalAccessExpr.expr);
} else {
varRef = cloneExpression(originalAccessExpr.expr);
}
varRef.type = types.getSafeType(originalAccessExpr.expr.type, true, false);
BLangAccessExpression accessExpr;
switch (originalAccessExpr.getKind()) {
case FIELD_BASED_ACCESS_EXPR:
accessExpr = ASTBuilderUtil.createFieldAccessExpr((BLangAccessibleExpression) varRef,
((BLangFieldBasedAccess) originalAccessExpr).field);
break;
case INDEX_BASED_ACCESS_EXPR:
accessExpr = ASTBuilderUtil.createIndexAccessExpr((BLangAccessibleExpression) varRef,
((BLangIndexBasedAccess) originalAccessExpr).indexExpr);
break;
case INVOCATION:
accessExpr = null;
break;
default:
throw new IllegalStateException();
}
accessExpr.originalType = originalAccessExpr.originalType;
accessExpr.pos = originalAccessExpr.pos;
accessExpr.lhsVar = originalAccessExpr.lhsVar;
accessExpr.symbol = originalAccessExpr.symbol;
accessExpr.errorSafeNavigation = false;
accessExpr.nilSafeNavigation = false;
accessExpr.type = originalAccessExpr.originalType;
return accessExpr;
}
private BLangBinaryExpr getModifiedIntRangeStartExpr(BLangExpression expr) {
BLangLiteral constOneLiteral = ASTBuilderUtil.createLiteral(expr.pos, symTable.intType, 1L);
return ASTBuilderUtil.createBinaryExpr(expr.pos, expr, constOneLiteral, symTable.intType, OperatorKind.ADD,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.ADD,
symTable.intType,
symTable.intType));
}
private BLangBinaryExpr getModifiedIntRangeEndExpr(BLangExpression expr) {
BLangLiteral constOneLiteral = ASTBuilderUtil.createLiteral(expr.pos, symTable.intType, 1L);
return ASTBuilderUtil.createBinaryExpr(expr.pos, expr, constOneLiteral, symTable.intType, OperatorKind.SUB,
(BOperatorSymbol) symResolver.resolveBinaryOperator(OperatorKind.SUB,
symTable.intType,
symTable.intType));
}
private BLangLiteral getBooleanLiteral(boolean value) {
BLangLiteral literal = (BLangLiteral) TreeBuilder.createLiteralExpression();
literal.value = value;
literal.type = symTable.booleanType;
literal.pos = symTable.builtinPos;
return literal;
}
private boolean isDefaultableMappingType(BType type) {
switch (types.getSafeType(type, true, false).tag) {
case TypeTags.JSON:
case TypeTags.MAP:
case TypeTags.RECORD:
return true;
default:
return false;
}
}
private BLangFunction createInitFunctionForClassDefn(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction initFunction =
TypeDefBuilderHelper.createInitFunctionForStructureType(classDefinition.pos, classDefinition.symbol,
env, names, Names.GENERATED_INIT_SUFFIX, symTable, classDefinition.type);
BObjectTypeSymbol typeSymbol = ((BObjectTypeSymbol) classDefinition.type.tsymbol);
typeSymbol.generatedInitializerFunc = new BAttachedFunction(Names.GENERATED_INIT_SUFFIX, initFunction.symbol,
(BInvokableType) initFunction.type, classDefinition.pos);
classDefinition.generatedInitFunction = initFunction;
initFunction.returnTypeNode.type = symTable.nilType;
return rewrite(initFunction, env);
}
private void visitBinaryLogicalExpr(BLangBinaryExpr binaryExpr) {
/*
* Desugar (lhsExpr && rhsExpr) to following if-else:
*
* logical AND:
* -------------
* T $result$;
* if (lhsExpr) {
* $result$ = rhsExpr;
* } else {
* $result$ = false;
* }
*
* logical OR:
* -------------
* T $result$;
* if (lhsExpr) {
* $result$ = true;
* } else {
* $result$ = rhsExpr;
* }
*
*/
BLangSimpleVariableDef resultVarDef = createVarDef("$result$", binaryExpr.type, null, symTable.builtinPos);
BLangBlockStmt thenBody = ASTBuilderUtil.createBlockStmt(binaryExpr.pos);
BLangBlockStmt elseBody = ASTBuilderUtil.createBlockStmt(binaryExpr.pos);
BLangSimpleVarRef thenResultVarRef = ASTBuilderUtil.createVariableRef(symTable.builtinPos,
resultVarDef.var.symbol);
BLangExpression thenResult;
if (binaryExpr.opKind == OperatorKind.AND) {
thenResult = binaryExpr.rhsExpr;
} else {
thenResult = getBooleanLiteral(true);
}
BLangAssignment thenAssignment =
ASTBuilderUtil.createAssignmentStmt(binaryExpr.pos, thenResultVarRef, thenResult);
thenBody.addStatement(thenAssignment);
BLangExpression elseResult;
BLangSimpleVarRef elseResultVarRef = ASTBuilderUtil.createVariableRef(symTable.builtinPos,
resultVarDef.var.symbol);
if (binaryExpr.opKind == OperatorKind.AND) {
elseResult = getBooleanLiteral(false);
} else {
elseResult = binaryExpr.rhsExpr;
}
BLangAssignment elseAssignment =
ASTBuilderUtil.createAssignmentStmt(binaryExpr.pos, elseResultVarRef, elseResult);
elseBody.addStatement(elseAssignment);
BLangSimpleVarRef resultVarRef = ASTBuilderUtil.createVariableRef(binaryExpr.pos, resultVarDef.var.symbol);
BLangIf ifElse = ASTBuilderUtil.createIfElseStmt(binaryExpr.pos, binaryExpr.lhsExpr, thenBody, elseBody);
BLangBlockStmt blockStmt = ASTBuilderUtil.createBlockStmt(binaryExpr.pos, Lists.of(resultVarDef, ifElse));
BLangStatementExpression stmtExpr = createStatementExpression(blockStmt, resultVarRef);
stmtExpr.type = binaryExpr.type;
result = rewriteExpr(stmtExpr);
}
/**
* Split packahe init function into several smaller functions.
*
* @param packageNode package node
* @param env symbol environment
* @return initial init function but trimmed in size
*/
private BLangFunction splitInitFunction(BLangPackage packageNode, SymbolEnv env) {
int methodSize = INIT_METHOD_SPLIT_SIZE;
BLangBlockFunctionBody funcBody = (BLangBlockFunctionBody) packageNode.initFunction.body;
if (!isJvmTarget) {
return packageNode.initFunction;
}
BLangFunction initFunction = packageNode.initFunction;
List<BLangFunction> generatedFunctions = new ArrayList<>();
List<BLangStatement> stmts = new ArrayList<>(funcBody.stmts);
funcBody.stmts.clear();
BLangFunction newFunc = initFunction;
BLangBlockFunctionBody newFuncBody = (BLangBlockFunctionBody) newFunc.body;
int varDefIndex = 0;
for (int i = 0; i < stmts.size(); i++) {
BLangStatement statement = stmts.get(i);
if (statement.getKind() == NodeKind.VARIABLE_DEF) {
break;
}
varDefIndex++;
if (i > 0 && (i % methodSize == 0 || isAssignmentWithInitOrRecordLiteralExpr(statement))) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.add(stmts.get(i));
}
List<BLangStatement> chunkStmts = new ArrayList<>();
for (int i = varDefIndex; i < stmts.size(); i++) {
BLangStatement stmt = stmts.get(i);
chunkStmts.add(stmt);
varDefIndex++;
if ((stmt.getKind() == NodeKind.ASSIGNMENT) &&
(((BLangAssignment) stmt).expr.getKind() == NodeKind.SERVICE_CONSTRUCTOR) &&
(newFuncBody.stmts.size() + chunkStmts.size() > methodSize)) {
if (newFuncBody.stmts.size() + chunkStmts.size() > methodSize) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.addAll(chunkStmts);
chunkStmts.clear();
} else if ((stmt.getKind() == NodeKind.ASSIGNMENT) &&
(((BLangAssignment) stmt).varRef instanceof BLangPackageVarRef) &&
Symbols.isFlagOn(((BLangPackageVarRef) ((BLangAssignment) stmt).varRef).varSymbol.flags,
Flags.LISTENER)
) {
break;
}
}
newFuncBody.stmts.addAll(chunkStmts);
for (int i = varDefIndex; i < stmts.size(); i++) {
if (i > 0 && i % methodSize == 0) {
generatedFunctions.add(newFunc);
newFunc = createIntermediateInitFunction(packageNode, env);
newFuncBody = (BLangBlockFunctionBody) newFunc.body;
symTable.rootScope.define(names.fromIdNode(newFunc.name), newFunc.symbol);
}
newFuncBody.stmts.add(stmts.get(i));
}
generatedFunctions.add(newFunc);
for (int j = 0; j < generatedFunctions.size() - 1; j++) {
BLangFunction thisFunction = generatedFunctions.get(j);
BLangCheckedExpr checkedExpr =
ASTBuilderUtil.createCheckExpr(initFunction.pos,
createInvocationNode(generatedFunctions.get(j + 1).name.value,
new ArrayList<>(), symTable.errorOrNilType),
symTable.nilType);
checkedExpr.equivalentErrorTypeList.add(symTable.errorType);
BLangExpressionStmt expressionStmt = ASTBuilderUtil
.createExpressionStmt(thisFunction.pos, (BLangBlockFunctionBody) thisFunction.body);
expressionStmt.expr = checkedExpr;
expressionStmt.expr.pos = initFunction.pos;
if (j > 0) {
thisFunction = rewrite(thisFunction, env);
packageNode.functions.add(thisFunction);
packageNode.topLevelNodes.add(thisFunction);
}
}
if (generatedFunctions.size() > 1) {
BLangFunction lastFunc = generatedFunctions.get(generatedFunctions.size() - 1);
lastFunc = rewrite(lastFunc, env);
packageNode.functions.add(lastFunc);
packageNode.topLevelNodes.add(lastFunc);
}
return generatedFunctions.get(0);
}
private boolean isAssignmentWithInitOrRecordLiteralExpr(BLangStatement statement) {
if (statement.getKind() == NodeKind.ASSIGNMENT) {
NodeKind exprKind = ((BLangAssignment) statement).getExpression().getKind();
return exprKind == NodeKind.TYPE_INIT_EXPR || exprKind == NodeKind.RECORD_LITERAL_EXPR;
}
return false;
}
/**
* Create an intermediate package init function.
*
* @param pkgNode package node
* @param env symbol environment of package
*/
private BLangFunction createIntermediateInitFunction(BLangPackage pkgNode, SymbolEnv env) {
String alias = pkgNode.symbol.pkgID.toString();
BLangFunction initFunction = ASTBuilderUtil
.createInitFunctionWithErrorOrNilReturn(pkgNode.pos, alias,
new Name(Names.INIT_FUNCTION_SUFFIX.value
+ this.initFuncIndex++), symTable);
createInvokableSymbol(initFunction, env);
return initFunction;
}
private BType getRestType(BInvokableSymbol invokableSymbol) {
if (invokableSymbol != null && invokableSymbol.restParam != null) {
return invokableSymbol.restParam.type;
}
return null;
}
private BType getRestType(BLangFunction function) {
if (function != null && function.restParam != null) {
return function.restParam.type;
}
return null;
}
private BVarSymbol getRestSymbol(BLangFunction function) {
if (function != null && function.restParam != null) {
return function.restParam.symbol;
}
return null;
}
private boolean isComputedKey(RecordLiteralNode.RecordField field) {
if (!field.isKeyValueField()) {
return false;
}
return ((BLangRecordLiteral.BLangRecordKeyValueField) field).key.computedKey;
}
private BLangRecordLiteral rewriteMappingConstructor(BLangRecordLiteral mappingConstructorExpr) {
List<RecordLiteralNode.RecordField> fields = mappingConstructorExpr.fields;
BType type = mappingConstructorExpr.type;
Location pos = mappingConstructorExpr.pos;
List<RecordLiteralNode.RecordField> rewrittenFields = new ArrayList<>(fields.size());
for (RecordLiteralNode.RecordField field : fields) {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKeyValueField keyValueField =
(BLangRecordLiteral.BLangRecordKeyValueField) field;
BLangRecordLiteral.BLangRecordKey key = keyValueField.key;
BLangExpression origKey = key.expr;
BLangExpression keyExpr;
if (key.computedKey) {
keyExpr = origKey;
} else {
keyExpr = origKey.getKind() == NodeKind.SIMPLE_VARIABLE_REF ? createStringLiteral(pos,
StringEscapeUtils.unescapeJava(((BLangSimpleVarRef) origKey).variableName.value)) :
((BLangLiteral) origKey);
}
BLangRecordLiteral.BLangRecordKeyValueField rewrittenField =
ASTBuilderUtil.createBLangRecordKeyValue(rewriteExpr(keyExpr),
rewriteExpr(keyValueField.valueExpr));
rewrittenField.pos = keyValueField.pos;
rewrittenField.key.pos = key.pos;
rewrittenFields.add(rewrittenField);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangSimpleVarRef varRefField = (BLangSimpleVarRef) field;
rewrittenFields.add(ASTBuilderUtil.createBLangRecordKeyValue(
rewriteExpr(createStringLiteral(pos,
StringEscapeUtils.unescapeJava(varRefField.variableName.value))),
rewriteExpr(varRefField)));
} else {
BLangRecordLiteral.BLangRecordSpreadOperatorField spreadOpField =
(BLangRecordLiteral.BLangRecordSpreadOperatorField) field;
spreadOpField.expr = rewriteExpr(spreadOpField.expr);
rewrittenFields.add(spreadOpField);
}
}
fields.clear();
return type.tag == TypeTags.RECORD ? new BLangStructLiteral(pos, type, rewrittenFields) :
new BLangMapLiteral(pos, type, rewrittenFields);
}
protected void addTransactionInternalModuleImport() {
PackageID packageID = new PackageID(Names.BALLERINA_INTERNAL_ORG, Lists.of(Names.TRANSACTION),
Names.TRANSACTION_INTERNAL_VERSION);
if (!env.enclPkg.packageID.equals(packageID)) {
BLangImportPackage importDcl = (BLangImportPackage) TreeBuilder.createImportPackageNode();
List<BLangIdentifier> pkgNameComps = new ArrayList<>();
pkgNameComps.add(ASTBuilderUtil.createIdentifier(env.enclPkg.pos, Names.TRANSACTION.value));
importDcl.pkgNameComps = pkgNameComps;
importDcl.pos = env.enclPkg.symbol.pos;
importDcl.orgName = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, Names.BALLERINA_INTERNAL_ORG.value);
importDcl.alias = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, "trx");
importDcl.version = ASTBuilderUtil.createIdentifier(env.enclPkg.pos, "");
importDcl.symbol = symTable.internalTransactionModuleSymbol;
env.enclPkg.imports.add(importDcl);
env.enclPkg.symbol.imports.add(importDcl.symbol);
}
}
}
|
class definition node for which the initializer is created
* @param env The env for the type node
* @return The generated initializer method
*/
private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction generatedInitFunc = createInitFunctionForClassDefn(classDefinition, env);
if (classDefinition.initFunction == null) {
return generatedInitFunc;
}
return wireUpGeneratedInitFunction(generatedInitFunc,
(BObjectTypeSymbol) classDefinition.symbol, classDefinition.initFunction);
}
|
class definition node for which the initializer is created
* @param env The env for the type node
* @return The generated initializer method
*/
private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction generatedInitFunc = createInitFunctionForClassDefn(classDefinition, env);
if (classDefinition.initFunction == null) {
return generatedInitFunc;
}
return wireUpGeneratedInitFunction(generatedInitFunc,
(BObjectTypeSymbol) classDefinition.symbol, classDefinition.initFunction);
}
|
ATM the `close` actually did nothing, but the proposed semantic from `Collector#close` would flush buffered data. If so I wonder it might have potential risk to call `close` before return if the `close` implementation is changed to clear the list future.
|
static List<StreamElement> popCompleted(StreamElementQueue<Integer> queue) {
final List<StreamElement> completed = new ArrayList<>();
TimestampedCollector<Integer> collector = new TimestampedCollector<>(new CollectorOutput<>(completed));
while (queue.hasCompletedElements()) {
queue.emitCompletedElement(collector);
}
collector.close();
return completed;
}
|
collector.close();
|
static List<StreamElement> popCompleted(StreamElementQueue<Integer> queue) {
final List<StreamElement> completed = new ArrayList<>();
TimestampedCollector<Integer> collector = new TimestampedCollector<>(new CollectorOutput<>(completed));
while (queue.hasCompletedElements()) {
queue.emitCompletedElement(collector);
}
collector.close();
return completed;
}
|
class QueueUtil {
static ResultFuture<Integer> putSucessfully(StreamElementQueue<Integer> queue, StreamElement streamElement) {
Optional<ResultFuture<Integer>> resultFuture = queue.tryPut(streamElement);
assertTrue(resultFuture.isPresent());
return resultFuture.get();
}
static void putUnsucessfully(StreamElementQueue<Integer> queue, StreamElement streamElement) {
Optional<ResultFuture<Integer>> resultFuture = queue.tryPut(streamElement);
assertFalse(resultFuture.isPresent());
}
/**
* Pops all completed elements from the head of this queue.
*
* @return Completed elements or empty list of none exists.
*/
}
|
class QueueUtil {
static ResultFuture<Integer> putSuccessfully(StreamElementQueue<Integer> queue, StreamElement streamElement) {
Optional<ResultFuture<Integer>> resultFuture = queue.tryPut(streamElement);
assertTrue(resultFuture.isPresent());
return resultFuture.get();
}
static void putUnsuccessfully(StreamElementQueue<Integer> queue, StreamElement streamElement) {
Optional<ResultFuture<Integer>> resultFuture = queue.tryPut(streamElement);
assertFalse(resultFuture.isPresent());
}
/**
* Pops all completed elements from the head of this queue.
*
* @return Completed elements or empty list if none exists.
*/
}
|
Should RegistryClientMixin be checking/handling this config file property for all cases (not just Gradle?)
|
void setGradleProperties(ArrayDeque<String> args, boolean batchMode) {
if (output.isShowErrors()) {
args.add("--full-stacktrace");
}
if (batchMode) {
args.add("--console=plain");
} else if (output.isAnsiEnabled()) {
args.add("--console=rich");
}
if (output.isCliTest()) {
args.add("--project-dir=" + projectRoot.toAbsolutePath());
}
args.add(registryClient.getRegistryClientProperty());
final String configFile = registryClient.getConfigArg() == null
? System.getProperty(RegistriesConfigLocator.CONFIG_FILE_PATH_PROPERTY)
: registryClient.getConfigArg();
if (configFile != null) {
args.add("-D" + RegistriesConfigLocator.CONFIG_FILE_PATH_PROPERTY + "=" + configFile);
}
args.addAll(flattenMappedProperties(propertiesOptions.properties));
}
|
final String configFile = registryClient.getConfigArg() == null
|
void setGradleProperties(ArrayDeque<String> args, boolean batchMode) {
if (output.isShowErrors()) {
args.add("--full-stacktrace");
}
if (batchMode) {
args.add("--console=plain");
} else if (output.isAnsiEnabled()) {
args.add("--console=rich");
}
if (output.isCliTest()) {
args.add("--project-dir=" + projectRoot.toAbsolutePath());
}
args.add(registryClient.getRegistryClientProperty());
final String configFile = registryClient.getConfigArg() == null
? System.getProperty(RegistriesConfigLocator.CONFIG_FILE_PATH_PROPERTY)
: registryClient.getConfigArg();
if (configFile != null) {
args.add("-D" + RegistriesConfigLocator.CONFIG_FILE_PATH_PROPERTY + "=" + configFile);
}
args.addAll(flattenMappedProperties(propertiesOptions.properties));
}
|
class GradleRunner implements BuildSystemRunner {
public static final String[] windowsWrapper = { "gradlew.cmd", "gradlew.bat" };
public static final String otherWrapper = "gradlew";
final OutputOptionMixin output;
final RegistryClientMixin registryClient;
final Path projectRoot;
final BuildTool buildTool;
final PropertiesOptions propertiesOptions;
public GradleRunner(OutputOptionMixin output, PropertiesOptions propertiesOptions, RegistryClientMixin registryClient,
Path projectRoot, BuildTool buildTool) {
this.output = output;
this.projectRoot = projectRoot;
this.buildTool = buildTool;
this.propertiesOptions = propertiesOptions;
this.registryClient = registryClient;
verifyBuildFile();
}
@Override
public File getWrapper() {
return ExecuteUtil.findWrapper(projectRoot, windowsWrapper, otherWrapper);
}
@Override
public File getExecutable() {
return ExecuteUtil.findExecutable("gradle",
"Unable to find the gradle executable, is it in your path?",
output);
}
@Override
public Path getProjectRoot() {
return projectRoot;
}
@Override
public OutputOptionMixin getOutput() {
return output;
}
@Override
public BuildTool getBuildTool() {
return buildTool;
}
@Override
public Integer listExtensionCategories(RunModeOption runMode, CategoryListFormatOptions format) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("listCategories");
args.add("--fromCli");
args.add("--format=" + format.getFormatString());
return run(prependExecutable(args));
}
@Override
public Integer listExtensions(RunModeOption runMode, ListFormatOptions format, boolean installable, String searchPattern,
String category) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("listExtensions");
args.add("--fromCli");
args.add("--format=" + format.getFormatString());
if (category != null && !category.isBlank()) {
args.add("--category=" + category);
}
if (!installable) {
args.add("--installed");
}
if (searchPattern != null) {
args.add("--searchPattern=" + searchPattern);
}
return run(prependExecutable(args));
}
@Override
public Integer addExtension(RunModeOption runMode, Set<String> extensions) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("addExtension");
String param = "--extensions=" + String.join(",", extensions);
args.add(param);
return run(prependExecutable(args));
}
@Override
public Integer removeExtension(RunModeOption runMode, Set<String> extensions) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("removeExtension");
String param = "--extensions=" + String.join(",", extensions);
args.add(param);
return run(prependExecutable(args));
}
@Override
public BuildCommandArgs prepareBuild(BuildOptions buildOptions, RunModeOption runMode, List<String> params) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
if (buildOptions.clean) {
args.add("clean");
}
args.add("build");
if (buildOptions.buildNative) {
args.add("-Dquarkus.package.type=native");
}
if (buildOptions.skipTests()) {
setSkipTests(args);
}
if (buildOptions.offline) {
args.add("--offline");
}
args.addAll(params);
return prependExecutable(args);
}
@Override
public List<Supplier<BuildCommandArgs>> prepareDevMode(DevOptions devOptions, DebugOptions debugOptions,
List<String> params) {
ArrayDeque<String> args = new ArrayDeque<>();
List<String> jvmArgs = new ArrayList<>();
setGradleProperties(args, false);
if (devOptions.clean) {
args.add("clean");
}
args.add("quarkusDev");
if (devOptions.skipTests()) {
setSkipTests(args);
}
debugOptions.addDebugArguments(args, jvmArgs);
propertiesOptions.flattenJvmArgs(jvmArgs, args);
paramsToQuarkusArgs(params, args);
try {
Path outputFile = Files.createTempFile("quarkus-dev", ".txt");
args.add("-Dio.quarkus.devmode-args=" + outputFile.toAbsolutePath().toString());
BuildCommandArgs buildCommandArgs = prependExecutable(args);
return Arrays.asList(new Supplier<BuildCommandArgs>() {
@Override
public BuildCommandArgs get() {
return buildCommandArgs;
}
}, new Supplier<BuildCommandArgs>() {
@Override
public BuildCommandArgs get() {
try {
List<String> lines = Files.readAllLines(outputFile).stream().filter(s -> !s.isBlank())
.collect(Collectors.toList());
BuildCommandArgs cmd = new BuildCommandArgs();
cmd.arguments = lines.toArray(new String[0]);
cmd.targetDirectory = buildCommandArgs.targetDirectory;
return cmd;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void setSkipTests(ArrayDeque<String> args) {
args.add("-x");
args.add("test");
}
void verifyBuildFile() {
for (String buildFileName : buildTool.getBuildFiles()) {
File buildFile = projectRoot.resolve(buildFileName).toFile();
if (buildFile.exists()) {
return;
}
}
throw new IllegalStateException("Was not able to find a build file in: " + projectRoot
+ " based on the following list: " + String.join(",", buildTool.getBuildFiles()));
}
}
|
class GradleRunner implements BuildSystemRunner {
public static final String[] windowsWrapper = { "gradlew.cmd", "gradlew.bat" };
public static final String otherWrapper = "gradlew";
final OutputOptionMixin output;
final RegistryClientMixin registryClient;
final Path projectRoot;
final BuildTool buildTool;
final PropertiesOptions propertiesOptions;
public GradleRunner(OutputOptionMixin output, PropertiesOptions propertiesOptions, RegistryClientMixin registryClient,
Path projectRoot, BuildTool buildTool) {
this.output = output;
this.projectRoot = projectRoot;
this.buildTool = buildTool;
this.propertiesOptions = propertiesOptions;
this.registryClient = registryClient;
verifyBuildFile();
}
@Override
public File getWrapper() {
return ExecuteUtil.findWrapper(projectRoot, windowsWrapper, otherWrapper);
}
@Override
public File getExecutable() {
return ExecuteUtil.findExecutable("gradle",
"Unable to find the gradle executable, is it in your path?",
output);
}
@Override
public Path getProjectRoot() {
return projectRoot;
}
@Override
public OutputOptionMixin getOutput() {
return output;
}
@Override
public BuildTool getBuildTool() {
return buildTool;
}
@Override
public Integer listExtensionCategories(RunModeOption runMode, CategoryListFormatOptions format) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("listCategories");
args.add("--fromCli");
args.add("--format=" + format.getFormatString());
return run(prependExecutable(args));
}
@Override
public Integer listExtensions(RunModeOption runMode, ListFormatOptions format, boolean installable, String searchPattern,
String category) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("listExtensions");
args.add("--fromCli");
args.add("--format=" + format.getFormatString());
if (category != null && !category.isBlank()) {
args.add("--category=" + category);
}
if (!installable) {
args.add("--installed");
}
if (searchPattern != null) {
args.add("--searchPattern=" + searchPattern);
}
return run(prependExecutable(args));
}
@Override
public Integer addExtension(RunModeOption runMode, Set<String> extensions) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("addExtension");
String param = "--extensions=" + String.join(",", extensions);
args.add(param);
return run(prependExecutable(args));
}
@Override
public Integer removeExtension(RunModeOption runMode, Set<String> extensions) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
args.add("removeExtension");
String param = "--extensions=" + String.join(",", extensions);
args.add(param);
return run(prependExecutable(args));
}
@Override
public BuildCommandArgs prepareBuild(BuildOptions buildOptions, RunModeOption runMode, List<String> params) {
ArrayDeque<String> args = new ArrayDeque<>();
setGradleProperties(args, runMode.isBatchMode());
if (buildOptions.clean) {
args.add("clean");
}
args.add("build");
if (buildOptions.buildNative) {
args.add("-Dquarkus.package.type=native");
}
if (buildOptions.skipTests()) {
setSkipTests(args);
}
if (buildOptions.offline) {
args.add("--offline");
}
args.addAll(params);
return prependExecutable(args);
}
@Override
public List<Supplier<BuildCommandArgs>> prepareDevMode(DevOptions devOptions, DebugOptions debugOptions,
List<String> params) {
ArrayDeque<String> args = new ArrayDeque<>();
List<String> jvmArgs = new ArrayList<>();
setGradleProperties(args, false);
if (devOptions.clean) {
args.add("clean");
}
args.add("quarkusDev");
if (devOptions.skipTests()) {
setSkipTests(args);
}
debugOptions.addDebugArguments(args, jvmArgs);
propertiesOptions.flattenJvmArgs(jvmArgs, args);
paramsToQuarkusArgs(params, args);
try {
Path outputFile = Files.createTempFile("quarkus-dev", ".txt");
args.add("-Dio.quarkus.devmode-args=" + outputFile.toAbsolutePath().toString());
BuildCommandArgs buildCommandArgs = prependExecutable(args);
return Arrays.asList(new Supplier<BuildCommandArgs>() {
@Override
public BuildCommandArgs get() {
return buildCommandArgs;
}
}, new Supplier<BuildCommandArgs>() {
@Override
public BuildCommandArgs get() {
try {
List<String> lines = Files.readAllLines(outputFile).stream().filter(s -> !s.isBlank())
.collect(Collectors.toList());
BuildCommandArgs cmd = new BuildCommandArgs();
cmd.arguments = lines.toArray(new String[0]);
cmd.targetDirectory = buildCommandArgs.targetDirectory;
return cmd;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void setSkipTests(ArrayDeque<String> args) {
args.add("-x");
args.add("test");
}
void verifyBuildFile() {
for (String buildFileName : buildTool.getBuildFiles()) {
File buildFile = projectRoot.resolve(buildFileName).toFile();
if (buildFile.exists()) {
return;
}
}
throw new IllegalStateException("Was not able to find a build file in: " + projectRoot
+ " based on the following list: " + String.join(",", buildTool.getBuildFiles()));
}
}
|
Shall we create an issue? Error unions are also effectively immutable. We need to check and sync the behaviour with compile-time behaviour if there are any differences, IMO.
|
private static Type getImmutableType(Type type, Set<Type> unresolvedTypes) {
if (TypeChecker.isInherentlyImmutableType(type) || (SymbolFlags.isFlagOn(type.getFlags(),
SymbolFlags.READONLY))) {
return type;
}
if (!unresolvedTypes.add(type)) {
return type;
}
return setImmutableIntersectionType(type, unresolvedTypes);
}
|
if (TypeChecker.isInherentlyImmutableType(type) || (SymbolFlags.isFlagOn(type.getFlags(),
|
private static Type getImmutableType(Type type, Set<Type> unresolvedTypes) {
if (TypeChecker.isInherentlyImmutableType(type) || (SymbolFlags.isFlagOn(type.getFlags(),
SymbolFlags.READONLY))) {
return type;
}
if (!unresolvedTypes.add(type)) {
return type;
}
return setImmutableIntersectionType(type, unresolvedTypes);
}
|
class ReadOnlyUtils {
/**
* Method to handle an update to a value, that is invalid due to the value being immutable.
*
* @param moduleName the name of the langlib module for whose values the error occurred
*/
static void handleInvalidUpdate(String moduleName) {
throw new BLangFreezeException(getModulePrefixedReason(moduleName, INVALID_UPDATE_ERROR_IDENTIFIER).getValue(),
BLangExceptionHelper.getErrorMessage(INVALID_READONLY_VALUE_UPDATE).getValue());
}
public static Type getReadOnlyType(Type type) {
if (type.isReadOnly()) {
return type;
}
if (!TypeChecker.isSelectivelyImmutableType(type, new HashSet<>())) {
throw new IllegalArgumentException(type.getName() + " cannot be a readonly type.");
}
return setImmutableTypeAndGetEffectiveType(type);
}
public static Type getReadOnlyType(Type type, Set<Type> unresolvedTypes) {
if (type.isReadOnly()) {
return type;
}
if (!TypeChecker.isSelectivelyImmutableType(type, new HashSet<>())) {
throw new IllegalArgumentException(type.getName() + " cannot be a readonly type.");
}
return setImmutableTypeAndGetEffectiveType(type, unresolvedTypes);
}
public static Type setImmutableTypeAndGetEffectiveType(Type type) {
Type immutableType = getAvailableImmutableType(type);
if (immutableType != null) {
return immutableType;
}
return setImmutableIntersectionType(type, new HashSet<>()).getEffectiveType();
}
public static Type setImmutableTypeAndGetEffectiveType(Type type, Set<Type> unresolvedTypes) {
Type immutableType = getAvailableImmutableType(type);
if (immutableType != null) {
return immutableType;
}
return setImmutableIntersectionType(type, unresolvedTypes).getEffectiveType();
}
private static Type getAvailableImmutableType(Type type) {
if (TypeChecker.isInherentlyImmutableType(type)) {
return type;
}
if (type.getTag() == TypeTags.INTERSECTION_TAG && type.isReadOnly()) {
return ((BIntersectionType) type).getEffectiveType();
}
IntersectionType immutableType = ((SelectivelyImmutableReferenceType) type).getImmutableType();
if (immutableType != null) {
return immutableType.getEffectiveType();
}
return null;
}
private static BIntersectionType setImmutableIntersectionType(Type type, Set<Type> unresolvedTypes) {
Type immutableType = ((SelectivelyImmutableReferenceType) type).getImmutableType();
if (immutableType != null) {
return (BIntersectionType) immutableType;
}
switch (type.getTag()) {
case TypeTags.XML_COMMENT_TAG:
BXmlType readonlyCommentType = new BXmlType(TypeConstants.READONLY_XML_COMMENT,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB,
null),
TypeTags.XML_COMMENT_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyCommentType);
case TypeTags.XML_ELEMENT_TAG:
BXmlType readonlyElementType = new BXmlType(TypeConstants.READONLY_XML_ELEMENT,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB,
null),
TypeTags.XML_ELEMENT_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyElementType);
case TypeTags.XML_PI_TAG:
BXmlType readonlyPI = new BXmlType(TypeConstants.READONLY_XML_PI,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB, null),
TypeTags.XML_PI_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyPI);
case TypeTags.XML_TAG:
BXmlType origXmlType = (BXmlType) type;
BXmlType immutableXmlType = new BXmlType(READONLY_XML_TNAME, origXmlType.getPackage(),
origXmlType.getTag(), true);
immutableXmlType.constraint = getImmutableType(origXmlType.constraint, unresolvedTypes);
return createAndSetImmutableIntersectionType(origXmlType, immutableXmlType);
case TypeTags.ARRAY_TAG:
BArrayType origArrayType = (BArrayType) type;
BArrayType immutableArrayType = new BArrayType(getImmutableType(origArrayType.getElementType(),
unresolvedTypes),
origArrayType.getSize(), true);
return createAndSetImmutableIntersectionType(origArrayType, immutableArrayType);
case TypeTags.TUPLE_TAG:
BTupleType origTupleType = (BTupleType) type;
List<Type> origTupleMemTypes = origTupleType.getTupleTypes();
List<Type> immutableMemTypes = new ArrayList<>(origTupleMemTypes.size());
for (Type origTupleMemType : origTupleMemTypes) {
immutableMemTypes.add(getImmutableType(origTupleMemType, unresolvedTypes));
}
Type origTupleRestType = origTupleType.getRestType();
BTupleType immutableTupleType =
new BTupleType(immutableMemTypes, origTupleRestType == null ? null :
getImmutableType(origTupleRestType, unresolvedTypes), origTupleType.getTypeFlags(),
origTupleType.isCyclic, true);
return createAndSetImmutableIntersectionType(origTupleType, immutableTupleType);
case TypeTags.MAP_TAG:
BMapType origMapType = (BMapType) type;
BMapType immutableMapType = new BMapType(getImmutableType(origMapType.getConstrainedType(),
unresolvedTypes), true);
return createAndSetImmutableIntersectionType(origMapType, immutableMapType);
case TypeTags.RECORD_TYPE_TAG:
BRecordType origRecordType = (BRecordType) type;
Map<String, Field> originalFields = origRecordType.getFields();
Map<String, Field> fields = new HashMap<>(originalFields.size());
for (Map.Entry<String, Field> entry : originalFields.entrySet()) {
Field originalField = entry.getValue();
fields.put(entry.getKey(),
new BField(getImmutableType(originalField.getFieldType(), unresolvedTypes),
originalField.getFieldName(), originalField.getFlags()));
}
BRecordType immutableRecordType = new BRecordType(origRecordType.getName().concat(" & readonly"),
origRecordType.getPackage(),
origRecordType.flags |= SymbolFlags.READONLY, fields,
null, origRecordType.sealed,
origRecordType.typeFlags);
BIntersectionType intersectionType = createAndSetImmutableIntersectionType(origRecordType,
immutableRecordType);
Type origRecordRestFieldType = origRecordType.restFieldType;
if (origRecordRestFieldType != null) {
immutableRecordType.restFieldType = getImmutableType(origRecordRestFieldType, unresolvedTypes);
}
return intersectionType;
case TypeTags.TABLE_TAG:
BTableType origTableType = (BTableType) type;
BTableType immutableTableType;
Type origKeyType = origTableType.getKeyType();
if (origKeyType != null) {
immutableTableType = new BTableType(getImmutableType(origTableType.getConstrainedType(),
unresolvedTypes),
getImmutableType(origKeyType, unresolvedTypes), true);
} else {
immutableTableType = new BTableType(getImmutableType(origTableType.getConstrainedType(),
unresolvedTypes),
origTableType.getFieldNames(), true);
}
return createAndSetImmutableIntersectionType(origTableType, immutableTableType);
case TypeTags.OBJECT_TYPE_TAG:
BObjectType origObjectType = (BObjectType) type;
Map<String, Field> originalObjectFields = origObjectType.getFields();
Map<String, Field> immutableObjectFields = new HashMap<>(originalObjectFields.size());
BObjectType immutableObjectType = new BObjectType(origObjectType.getName().concat(" & readonly"),
origObjectType.getPackage(),
origObjectType.flags |= SymbolFlags.READONLY);
immutableObjectType.setFields(immutableObjectFields);
immutableObjectType.generatedInitializer = origObjectType.generatedInitializer;
immutableObjectType.initializer = origObjectType.initializer;
immutableObjectType.setMethods(origObjectType.getMethods());
BIntersectionType objectIntersectionType = createAndSetImmutableIntersectionType(origObjectType,
immutableObjectType);
for (Map.Entry<String, Field> entry : originalObjectFields.entrySet()) {
Field originalField = entry.getValue();
immutableObjectFields.put(entry.getKey(), new BField(getImmutableType(originalField.getFieldType(),
unresolvedTypes),
originalField.getFieldName(), originalField.getFlags()));
}
return objectIntersectionType;
case TypeTags.ANY_TAG:
case TypeTags.ANYDATA_TAG:
case TypeTags.JSON_TAG:
return (BIntersectionType) type.getImmutableType();
default:
BUnionType origUnionType = (BUnionType) type;
Type resultantImmutableType;
List<Type> readOnlyMemTypes = new ArrayList<>();
for (Type memberType : origUnionType.getMemberTypes()) {
if (TypeChecker.isInherentlyImmutableType(memberType)) {
readOnlyMemTypes.add(memberType);
continue;
}
if (!TypeChecker.isSelectivelyImmutableType(memberType, unresolvedTypes)) {
continue;
}
readOnlyMemTypes.add(getImmutableType(memberType, unresolvedTypes));
}
if (readOnlyMemTypes.size() == 1) {
resultantImmutableType = readOnlyMemTypes.iterator().next();
} else if (!unresolvedTypes.add(type)) {
resultantImmutableType = origUnionType;
} else {
resultantImmutableType = new BUnionType(readOnlyMemTypes, true, origUnionType.isCyclic);
}
return createAndSetImmutableIntersectionType(origUnionType, resultantImmutableType);
}
}
private static BIntersectionType createAndSetImmutableIntersectionType(Type originalType, Type effectiveType) {
return createAndSetImmutableIntersectionType(originalType.getPackage(), originalType, effectiveType);
}
private static BIntersectionType createAndSetImmutableIntersectionType(Module pkg, Type originalType,
Type effectiveType) {
int typeFlags = 0;
if (effectiveType.isAnydata()) {
typeFlags |= TypeFlags.ANYDATA;
}
if (effectiveType.isPureType()) {
typeFlags |= TypeFlags.PURETYPE;
}
if (effectiveType.isNilable()) {
typeFlags |= TypeFlags.NILABLE;
}
BIntersectionType intersectionType = new BIntersectionType(pkg,
new Type[]{originalType,
PredefinedTypes.TYPE_READONLY},
(IntersectableReferenceType) effectiveType,
typeFlags, true);
originalType.setImmutableType(intersectionType);
return intersectionType;
}
private ReadOnlyUtils() {
}
}
|
class ReadOnlyUtils {
/**
* Method to handle an update to a value, that is invalid due to the value being immutable.
*
* @param moduleName the name of the langlib module for whose values the error occurred
*/
static void handleInvalidUpdate(String moduleName) {
throw new BLangFreezeException(getModulePrefixedReason(moduleName, INVALID_UPDATE_ERROR_IDENTIFIER).getValue(),
BLangExceptionHelper.getErrorMessage(INVALID_READONLY_VALUE_UPDATE).getValue());
}
public static Type getReadOnlyType(Type type) {
if (type.isReadOnly()) {
return type;
}
if (!TypeChecker.isSelectivelyImmutableType(type, new HashSet<>())) {
throw new IllegalArgumentException(type.getName() + " cannot be a readonly type.");
}
return setImmutableTypeAndGetEffectiveType(type);
}
public static Type getReadOnlyType(Type type, Set<Type> unresolvedTypes) {
if (type.isReadOnly()) {
return type;
}
if (!TypeChecker.isSelectivelyImmutableType(type, new HashSet<>())) {
throw new IllegalArgumentException(type.getName() + " cannot be a readonly type.");
}
return setImmutableTypeAndGetEffectiveType(type, unresolvedTypes);
}
public static Type setImmutableTypeAndGetEffectiveType(Type type) {
Type immutableType = getAvailableImmutableType(type);
if (immutableType != null) {
return immutableType;
}
return setImmutableIntersectionType(type, new HashSet<>()).getEffectiveType();
}
public static Type setImmutableTypeAndGetEffectiveType(Type type, Set<Type> unresolvedTypes) {
Type immutableType = getAvailableImmutableType(type);
if (immutableType != null) {
return immutableType;
}
return setImmutableIntersectionType(type, unresolvedTypes).getEffectiveType();
}
private static Type getAvailableImmutableType(Type type) {
if (TypeChecker.isInherentlyImmutableType(type)) {
return type;
}
if (type.getTag() == TypeTags.INTERSECTION_TAG && type.isReadOnly()) {
return ((BIntersectionType) type).getEffectiveType();
}
IntersectionType immutableType = ((SelectivelyImmutableReferenceType) type).getImmutableType();
if (immutableType != null) {
return immutableType.getEffectiveType();
}
return null;
}
private static BIntersectionType setImmutableIntersectionType(Type type, Set<Type> unresolvedTypes) {
Type immutableType = ((SelectivelyImmutableReferenceType) type).getImmutableType();
if (immutableType != null) {
return (BIntersectionType) immutableType;
}
switch (type.getTag()) {
case TypeTags.XML_COMMENT_TAG:
BXmlType readonlyCommentType = new BXmlType(TypeConstants.READONLY_XML_COMMENT,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB,
null),
TypeTags.XML_COMMENT_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyCommentType);
case TypeTags.XML_ELEMENT_TAG:
BXmlType readonlyElementType = new BXmlType(TypeConstants.READONLY_XML_ELEMENT,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB,
null),
TypeTags.XML_ELEMENT_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyElementType);
case TypeTags.XML_PI_TAG:
BXmlType readonlyPI = new BXmlType(TypeConstants.READONLY_XML_PI,
new Module(BALLERINA_BUILTIN_PKG_PREFIX, XML_LANG_LIB, null),
TypeTags.XML_PI_TAG, true);
return createAndSetImmutableIntersectionType(type, readonlyPI);
case TypeTags.XML_TAG:
BXmlType origXmlType = (BXmlType) type;
BXmlType immutableXmlType = new BXmlType(READONLY_XML_TNAME, origXmlType.getPackage(),
origXmlType.getTag(), true);
immutableXmlType.constraint = getImmutableType(origXmlType.constraint, unresolvedTypes);
return createAndSetImmutableIntersectionType(origXmlType, immutableXmlType);
case TypeTags.ARRAY_TAG:
BArrayType origArrayType = (BArrayType) type;
BArrayType immutableArrayType = new BArrayType(getImmutableType(origArrayType.getElementType(),
unresolvedTypes),
origArrayType.getSize(), true);
return createAndSetImmutableIntersectionType(origArrayType, immutableArrayType);
case TypeTags.TUPLE_TAG:
BTupleType origTupleType = (BTupleType) type;
List<Type> origTupleMemTypes = origTupleType.getTupleTypes();
List<Type> immutableMemTypes = new ArrayList<>(origTupleMemTypes.size());
for (Type origTupleMemType : origTupleMemTypes) {
immutableMemTypes.add(getImmutableType(origTupleMemType, unresolvedTypes));
}
Type origTupleRestType = origTupleType.getRestType();
BTupleType immutableTupleType =
new BTupleType(immutableMemTypes, origTupleRestType == null ? null :
getImmutableType(origTupleRestType, unresolvedTypes), origTupleType.getTypeFlags(),
origTupleType.isCyclic, true);
return createAndSetImmutableIntersectionType(origTupleType, immutableTupleType);
case TypeTags.MAP_TAG:
BMapType origMapType = (BMapType) type;
BMapType immutableMapType = new BMapType(getImmutableType(origMapType.getConstrainedType(),
unresolvedTypes), true);
return createAndSetImmutableIntersectionType(origMapType, immutableMapType);
case TypeTags.RECORD_TYPE_TAG:
BRecordType origRecordType = (BRecordType) type;
Map<String, Field> originalFields = origRecordType.getFields();
Map<String, Field> fields = new HashMap<>(originalFields.size());
for (Map.Entry<String, Field> entry : originalFields.entrySet()) {
Field originalField = entry.getValue();
fields.put(entry.getKey(),
new BField(getImmutableType(originalField.getFieldType(), unresolvedTypes),
originalField.getFieldName(), originalField.getFlags()));
}
BRecordType immutableRecordType = new BRecordType(origRecordType.getName().concat(" & readonly"),
origRecordType.getPackage(),
origRecordType.flags |= SymbolFlags.READONLY, fields,
null, origRecordType.sealed,
origRecordType.typeFlags);
BIntersectionType intersectionType = createAndSetImmutableIntersectionType(origRecordType,
immutableRecordType);
Type origRecordRestFieldType = origRecordType.restFieldType;
if (origRecordRestFieldType != null) {
immutableRecordType.restFieldType = getImmutableType(origRecordRestFieldType, unresolvedTypes);
}
return intersectionType;
case TypeTags.TABLE_TAG:
BTableType origTableType = (BTableType) type;
BTableType immutableTableType;
Type origKeyType = origTableType.getKeyType();
if (origKeyType != null) {
immutableTableType = new BTableType(getImmutableType(origTableType.getConstrainedType(),
unresolvedTypes),
getImmutableType(origKeyType, unresolvedTypes), true);
} else {
immutableTableType = new BTableType(getImmutableType(origTableType.getConstrainedType(),
unresolvedTypes),
origTableType.getFieldNames(), true);
}
return createAndSetImmutableIntersectionType(origTableType, immutableTableType);
case TypeTags.OBJECT_TYPE_TAG:
BObjectType origObjectType = (BObjectType) type;
Map<String, Field> originalObjectFields = origObjectType.getFields();
Map<String, Field> immutableObjectFields = new HashMap<>(originalObjectFields.size());
BObjectType immutableObjectType = new BObjectType(origObjectType.getName().concat(" & readonly"),
origObjectType.getPackage(),
origObjectType.flags |= SymbolFlags.READONLY);
immutableObjectType.setFields(immutableObjectFields);
immutableObjectType.generatedInitializer = origObjectType.generatedInitializer;
immutableObjectType.initializer = origObjectType.initializer;
immutableObjectType.setMethods(origObjectType.getMethods());
BIntersectionType objectIntersectionType = createAndSetImmutableIntersectionType(origObjectType,
immutableObjectType);
for (Map.Entry<String, Field> entry : originalObjectFields.entrySet()) {
Field originalField = entry.getValue();
immutableObjectFields.put(entry.getKey(), new BField(getImmutableType(originalField.getFieldType(),
unresolvedTypes),
originalField.getFieldName(), originalField.getFlags()));
}
return objectIntersectionType;
case TypeTags.ANY_TAG:
case TypeTags.ANYDATA_TAG:
case TypeTags.JSON_TAG:
return (BIntersectionType) type.getImmutableType();
default:
BUnionType origUnionType = (BUnionType) type;
Type resultantImmutableType;
List<Type> readOnlyMemTypes = new ArrayList<>();
for (Type memberType : origUnionType.getMemberTypes()) {
if (TypeChecker.isInherentlyImmutableType(memberType)) {
readOnlyMemTypes.add(memberType);
continue;
}
if (!TypeChecker.isSelectivelyImmutableType(memberType, unresolvedTypes)) {
continue;
}
readOnlyMemTypes.add(getImmutableType(memberType, unresolvedTypes));
}
if (readOnlyMemTypes.size() == 1) {
resultantImmutableType = readOnlyMemTypes.iterator().next();
} else if (!unresolvedTypes.add(type)) {
resultantImmutableType = origUnionType;
} else {
resultantImmutableType = new BUnionType(readOnlyMemTypes, true, origUnionType.isCyclic);
}
return createAndSetImmutableIntersectionType(origUnionType, resultantImmutableType);
}
}
private static BIntersectionType createAndSetImmutableIntersectionType(Type originalType, Type effectiveType) {
return createAndSetImmutableIntersectionType(originalType.getPackage(), originalType, effectiveType);
}
private static BIntersectionType createAndSetImmutableIntersectionType(Module pkg, Type originalType,
Type effectiveType) {
int typeFlags = 0;
if (effectiveType.isAnydata()) {
typeFlags |= TypeFlags.ANYDATA;
}
if (effectiveType.isPureType()) {
typeFlags |= TypeFlags.PURETYPE;
}
if (effectiveType.isNilable()) {
typeFlags |= TypeFlags.NILABLE;
}
BIntersectionType intersectionType = new BIntersectionType(pkg,
new Type[]{originalType,
PredefinedTypes.TYPE_READONLY},
(IntersectableReferenceType) effectiveType,
typeFlags, true);
originalType.setImmutableType(intersectionType);
return intersectionType;
}
private ReadOnlyUtils() {
}
}
|
Does it mean we run the same tests for `HEAP` backend twice? (twice for native, and twice for canonical) Shall we skip one execution?
|
private static StateBackendConfig heap(boolean incremental, boolean changelogEnabled) {
return new StateBackendConfig(changelogEnabled, incremental /* ignored for now */) {
@Override
public String getName() {
return "HEAP";
}
@Override
public Configuration getConfiguration() {
Configuration stateBackendConfig = super.getConfiguration();
stateBackendConfig.set(
CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, MemorySize.ZERO);
return stateBackendConfig;
}
@Override
protected String getConfigName() {
return "filesystem";
}
@Override
public boolean isIncremental() {
return false;
}
};
}
|
return new StateBackendConfig(changelogEnabled, incremental /* ignored for now */) {
|
private static StateBackendConfig heap(boolean incremental, boolean changelogEnabled) {
return new StateBackendConfig(changelogEnabled, incremental /* ignored for now */) {
@Override
public String getName() {
return "HEAP";
}
@Override
public Configuration getConfiguration() {
Configuration stateBackendConfig = super.getConfiguration();
stateBackendConfig.set(
CheckpointingOptions.FS_SMALL_FILE_THRESHOLD, MemorySize.ZERO);
return stateBackendConfig;
}
@Override
protected String getConfigName() {
return "filesystem";
}
@Override
public boolean isIncremental() {
return false;
}
};
}
|
class StateBackendConfig {
protected final boolean changelogEnabled;
protected final boolean incremental;
protected StateBackendConfig(boolean changelogEnabled, boolean incremental) {
this.changelogEnabled = changelogEnabled;
this.incremental = incremental;
}
public abstract String getName();
public Configuration getConfiguration() {
Configuration stateBackendConfig = new Configuration();
stateBackendConfig.setString(StateBackendOptions.STATE_BACKEND, getConfigName());
stateBackendConfig.set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incremental);
stateBackendConfig.set(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, changelogEnabled);
return stateBackendConfig;
}
public int getCheckpointsBeforeSavepoint() {
return 0;
}
protected abstract String getConfigName();
@Override
public final String toString() {
return String.format(
"%s, incremental: %b, changelog: %b", getName(), incremental, changelogEnabled);
}
private static final List<BiFunction<Boolean, Boolean, StateBackendConfig>> builders =
asList(SavepointFormatITCase::getRocksdb, SavepointFormatITCase::heap);
public abstract boolean isIncremental();
private boolean isChangelogEnabled() {
return changelogEnabled;
}
}
|
class StateBackendConfig {
protected final boolean changelogEnabled;
protected final boolean incremental;
protected StateBackendConfig(boolean changelogEnabled, boolean incremental) {
this.changelogEnabled = changelogEnabled;
this.incremental = incremental;
}
public abstract String getName();
public Configuration getConfiguration() {
Configuration stateBackendConfig = new Configuration();
stateBackendConfig.setString(StateBackendOptions.STATE_BACKEND, getConfigName());
stateBackendConfig.set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incremental);
stateBackendConfig.set(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, changelogEnabled);
return stateBackendConfig;
}
public int getCheckpointsBeforeSavepoint() {
return 0;
}
protected abstract String getConfigName();
@Override
public final String toString() {
return String.format(
"%s, incremental: %b, changelog: %b", getName(), incremental, changelogEnabled);
}
private static final List<BiFunction<Boolean, Boolean, StateBackendConfig>> builders =
asList(SavepointFormatITCase::getRocksdb, SavepointFormatITCase::heap);
public abstract boolean isIncremental();
private boolean isChangelogEnabled() {
return changelogEnabled;
}
}
|
I think in this case we should setError() on the rpc request and return it?
|
public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(0, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(1, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage());
}
}
|
log.log(Level.WARNING, e, () -> "Failed to handle log request: " + e.getMessage());
|
public void run() {
try {
byte compressionType = rpcRequest.parameters().get(0).asInt8();
if (compressionType != 0) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, "Invalid compression type: " + compressionType);
rpcRequest.returnRequest();
return;
}
int uncompressedSize = rpcRequest.parameters().get(1).asInt32();
byte[] logRequestPayload = rpcRequest.parameters().get(2).asData();
if (uncompressedSize != logRequestPayload.length) {
rpcRequest.setError(ErrorCode.METHOD_FAILED, String.format("Invalid uncompressed size: got %d while data is of size %d ", uncompressedSize, logRequestPayload.length));
rpcRequest.returnRequest();
return;
}
logDispatcher.handle(ProtobufSerialization.fromLogRequest(logRequestPayload));
rpcRequest.returnValues().add(new Int8Value((byte)0));
byte[] responsePayload = ProtobufSerialization.toLogResponse();
rpcRequest.returnValues().add(new Int32Value(responsePayload.length));
rpcRequest.returnValues().add(new DataValue(responsePayload));
rpcRequest.returnRequest();
} catch (Exception e) {
String errorMessage = "Failed to handle log request: " + e.getMessage();
log.log(Level.WARNING, e, () -> errorMessage);
rpcRequest.setError(ErrorCode.METHOD_FAILED, errorMessage);
rpcRequest.returnRequest();
}
}
|
class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
}
|
class ArchiveLogMessagesTask implements Runnable {
final Request rpcRequest;
final LogDispatcher logDispatcher;
ArchiveLogMessagesTask(Request rpcRequest, LogDispatcher logDispatcher) {
this.rpcRequest = rpcRequest;
this.logDispatcher = logDispatcher;
}
@Override
}
|
I will try to add a new `MultipleParameterTool extends ParameterTool` instead. It is an enhanced class with multiple parameters support. And it will be marked as `PublicEvolving`. It should be compatible with `ParameterTool`, all the tests of `ParameterToolTest` should pass.
|
public static ParameterTool fromArgs(String[] args) {
final Map<String, Collection<String>> map = new HashMap<>(args.length / 2);
int i = 0;
while (i < args.length) {
final String key;
if (args[i].startsWith("--")) {
key = args[i].substring(2);
} else if (args[i].startsWith("-")) {
key = args[i].substring(1);
} else {
throw new IllegalArgumentException(
String.format("Error parsing arguments '%s' on '%s'. Please prefix keys with -- or -.",
Arrays.toString(args), args[i]));
}
if (key.isEmpty()) {
throw new IllegalArgumentException(
"The input " + Arrays.toString(args) + " contains an empty argument");
}
i += 1;
map.putIfAbsent(key, new ArrayList<>());
if (i >= args.length) {
map.get(key).add(NO_VALUE_KEY);
} else if (NumberUtils.isNumber(args[i])) {
map.get(key).add(args[i]);
i += 1;
} else if (args[i].startsWith("--") || args[i].startsWith("-")) {
map.get(key).add(NO_VALUE_KEY);
} else {
map.get(key).add(args[i]);
i += 1;
}
}
return fromMultiMap(map);
}
|
map.putIfAbsent(key, new ArrayList<>());
|
public static ParameterTool fromArgs(String[] args) {
final Map<String, String> map = new HashMap<>(args.length / 2);
int i = 0;
while (i < args.length) {
final String key = Utils.getKeyFromArgs(args, i);
if (key.isEmpty()) {
throw new IllegalArgumentException(
"The input " + Arrays.toString(args) + " contains an empty argument");
}
i += 1;
if (i >= args.length) {
map.put(key, NO_VALUE_KEY);
} else if (NumberUtils.isNumber(args[i])) {
map.put(key, args[i]);
i += 1;
} else if (args[i].startsWith("--") || args[i].startsWith("-")) {
map.put(key, NO_VALUE_KEY);
} else {
map.put(key, args[i]);
i += 1;
}
}
return fromMap(map);
}
|
class ParameterTool extends ExecutionConfig.GlobalJobParameters implements Serializable, Cloneable {
private static final long serialVersionUID = 1L;
protected static final String NO_VALUE_KEY = "__NO_VALUE_KEY";
protected static final String DEFAULT_UNDEFINED = "<undefined>";
/**
* Returns {@link ParameterTool} for the given arguments. The arguments are keys followed by values.
* Keys have to start with '-' or '--'
*
* <p><strong>Example arguments:</strong>
* --key1 value1 --key2 value2 -key3 value3
*
* @param args Input array arguments
* @return A {@link ParameterTool}
*/
/**
* Returns {@link ParameterTool} for the given {@link Properties} file.
*
* @param path Path to the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(String path) throws IOException {
File propertiesFile = new File(path);
return fromPropertiesFile(propertiesFile);
}
/**
* Returns {@link ParameterTool} for the given {@link Properties} file.
*
* @param file File object to the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(File file) throws IOException {
if (!file.exists()) {
throw new FileNotFoundException("Properties file " + file.getAbsolutePath() + " does not exist");
}
try (FileInputStream fis = new FileInputStream(file)) {
return fromPropertiesFile(fis);
}
}
/**
* Returns {@link ParameterTool} for the given InputStream from {@link Properties} file.
*
* @param inputStream InputStream from the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(InputStream inputStream) throws IOException {
Properties props = new Properties();
props.load(inputStream);
return fromMap((Map) props);
}
/**
* Returns {@link ParameterTool} for the given map.
*
* @param map A map of arguments. Both Key and Value have to be Strings
* @return A {@link ParameterTool}
*/
public static ParameterTool fromMap(Map<String, String> map) {
Preconditions.checkNotNull(map, "Unable to initialize from empty map");
Map<String, Collection<String>> multiMap = new HashMap<>();
map.forEach((key, value) -> {
if (value == null) {
value = NO_VALUE_KEY;
}
multiMap.put(key, Collections.singletonList(value));
});
return new ParameterTool(multiMap);
}
/**
* Returns {@link ParameterTool} for the given multi map.
*
* @param multiMap A map of arguments. Key is String and value is a Collection.
* @return A {@link ParameterTool}
*/
public static ParameterTool fromMultiMap(Map<String, Collection<String>> multiMap) {
Preconditions.checkNotNull(multiMap, "Unable to initialize from empty map");
return new ParameterTool(multiMap);
}
/**
* Returns {@link ParameterTool} from the system properties.
* Example on how to pass system properties:
* -Dkey1=value1 -Dkey2=value2
*
* @return A {@link ParameterTool}
*/
public static ParameterTool fromSystemProperties() {
return fromMap((Map) System.getProperties());
}
protected final Map<String, Collection<String>> data;
protected transient Map<String, String> defaultData;
protected transient Set<String> unrequestedParameters;
private ParameterTool(Map<String, Collection<String>> data) {
this.data = Collections.unmodifiableMap(new HashMap<>(data));
this.defaultData = new ConcurrentHashMap<>(data.size());
this.unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(data.size()));
unrequestedParameters.addAll(data.keySet());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ParameterTool that = (ParameterTool) o;
return Objects.equals(data, that.data) &&
Objects.equals(defaultData, that.defaultData) &&
Objects.equals(unrequestedParameters, that.unrequestedParameters);
}
@Override
public int hashCode() {
return Objects.hash(data, defaultData, unrequestedParameters);
}
/**
* Returns the set of parameter names which have not been requested with
* {@link
* map returned by {@link
*/
@PublicEvolving
public Set<String> getUnrequestedParameters() {
return Collections.unmodifiableSet(unrequestedParameters);
}
/**
* Returns number of parameters in {@link ParameterTool}.
*/
public int getNumberOfParameters() {
return data.size();
}
/**
* Returns the String value for the given key. The value should only have one item.
* Use {@link
* If the key does not exist it will return null.
*/
public String get(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
if (!data.containsKey(key)) {
return null;
}
Preconditions.checkState(data.get(key).size() == 1,
"Key %s should has only one value, use getMultiParameter(String) instead if want to get multiple values.", key);
return (String) data.get(key).toArray()[0];
}
/**
* Returns the String value for the given key.
* If the key does not exist it will throw a {@link RuntimeException}.
*/
public String getRequired(String key) {
addToDefaults(key, null);
String value = get(key);
if (value == null) {
throw new RuntimeException("No data for required key '" + key + "'");
}
return value;
}
/**
* Returns the String value for the given key.
* If the key does not exist it will return the given default value.
*/
public String get(String key, String defaultValue) {
addToDefaults(key, defaultValue);
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return value;
}
}
/**
* Returns the Collection of String values for the given key.
* If the key does not exist it will return null.
*/
public Collection<String> getMultiParameter(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return data.getOrDefault(key, null);
}
/**
* Returns the Collection of String values for the given key.
* If the key does not exist it will throw a {@link RuntimeException}.
*/
public Collection<String> getMultiParameterRequired(String key) {
addToDefaults(key, null);
Collection<String> value = getMultiParameter(key);
if (value == null) {
throw new RuntimeException("No data for required key '" + key + "'");
}
return value;
}
/**
* Check if value is set.
*/
public boolean has(String value) {
addToDefaults(value, null);
unrequestedParameters.remove(value);
return data.containsKey(value);
}
/**
* Returns the Integer value for the given key.
* The method fails if the key does not exist or the value is not an Integer.
*/
public int getInt(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Integer.parseInt(value);
}
/**
* Returns the Integer value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not an Integer.
*/
public int getInt(String key, int defaultValue) {
addToDefaults(key, Integer.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
}
return Integer.parseInt(value);
}
/**
* Returns the Long value for the given key.
* The method fails if the key does not exist.
*/
public long getLong(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Long.parseLong(value);
}
/**
* Returns the Long value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not a Long.
*/
public long getLong(String key, long defaultValue) {
addToDefaults(key, Long.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
}
return Long.parseLong(value);
}
/**
* Returns the Float value for the given key.
* The method fails if the key does not exist.
*/
public float getFloat(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Float.valueOf(value);
}
/**
* Returns the Float value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not a Float.
*/
public float getFloat(String key, float defaultValue) {
addToDefaults(key, Float.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Float.valueOf(value);
}
}
/**
* Returns the Double value for the given key.
* The method fails if the key does not exist.
*/
public double getDouble(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Double.valueOf(value);
}
/**
* Returns the Double value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not a Double.
*/
public double getDouble(String key, double defaultValue) {
addToDefaults(key, Double.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Double.valueOf(value);
}
}
/**
* Returns the Boolean value for the given key.
* The method fails if the key does not exist.
*/
public boolean getBoolean(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Boolean.valueOf(value);
}
/**
* Returns the Boolean value for the given key. If the key does not exists it will return the default value given.
* The method returns whether the string of the value is "true" ignoring cases.
*/
public boolean getBoolean(String key, boolean defaultValue) {
addToDefaults(key, Boolean.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Boolean.valueOf(value);
}
}
/**
* Returns the Short value for the given key.
* The method fails if the key does not exist.
*/
public short getShort(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Short.valueOf(value);
}
/**
* Returns the Short value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not a Short.
*/
public short getShort(String key, short defaultValue) {
addToDefaults(key, Short.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Short.valueOf(value);
}
}
/**
* Returns the Byte value for the given key.
* The method fails if the key does not exist.
*/
public byte getByte(String key) {
addToDefaults(key, null);
String value = getRequired(key);
return Byte.valueOf(value);
}
/**
* Returns the Byte value for the given key. If the key does not exists it will return the default value given.
* The method fails if the value is not a Byte.
*/
public byte getByte(String key, byte defaultValue) {
addToDefaults(key, Byte.toString(defaultValue));
String value = get(key);
if (value == null) {
return defaultValue;
} else {
return Byte.valueOf(value);
}
}
protected void addToDefaults(String key, String value) {
final String currentValue = defaultData.get(key);
if (currentValue == null) {
if (value == null) {
value = DEFAULT_UNDEFINED;
}
defaultData.put(key, value);
} else {
if (currentValue.equals(DEFAULT_UNDEFINED) && value != null) {
defaultData.put(key, value);
}
}
}
/**
* Returns a {@link Configuration} object from this {@link ParameterTool}.
*
* @return A {@link Configuration}
*/
public Configuration getConfiguration() {
final Configuration conf = new Configuration();
for (Map.Entry<String, String> entry : toMap().entrySet()) {
conf.setString(entry.getKey(), entry.getValue());
}
return conf;
}
/**
* Returns a {@link Properties} object from this {@link ParameterTool}.
*
* @return A {@link Properties}
*/
public Properties getProperties() {
Properties props = new Properties();
props.putAll(toMap());
return props;
}
/**
* Create a properties file with all the known parameters (call after the last get*() call).
* Set the default value, if available.
*
* <p>Use this method to create a properties file skeleton.
*
* @param pathToFile Location of the default properties file.
*/
public void createPropertiesFile(String pathToFile) throws IOException {
createPropertiesFile(pathToFile, true);
}
/**
* Create a properties file with all the known parameters (call after the last get*() call).
* Set the default value, if overwrite is true.
*
* @param pathToFile Location of the default properties file.
* @param overwrite Boolean flag indicating whether or not to overwrite the file
* @throws IOException If overwrite is not allowed and the file exists
*/
public void createPropertiesFile(String pathToFile, boolean overwrite) throws IOException {
final File file = new File(pathToFile);
if (file.exists()) {
if (overwrite) {
file.delete();
} else {
throw new RuntimeException("File " + pathToFile + " exists and overwriting is not allowed");
}
}
final Properties defaultProps = new Properties();
defaultProps.putAll(this.defaultData);
try (final OutputStream out = new FileOutputStream(file)) {
defaultProps.store(out, "Default file created by Flink's ParameterUtil.createPropertiesFile()");
}
}
@Override
protected Object clone() throws CloneNotSupportedException {
return new ParameterTool(this.data);
}
/**
* Merges two {@link ParameterTool}.
*
* @param other Other {@link ParameterTool} object
* @return The Merged {@link ParameterTool}
*/
public ParameterTool mergeWith(ParameterTool other) {
final Map<String, Collection<String>> resultData = new HashMap<>(data.size() + other.data.size());
resultData.putAll(data);
resultData.putAll(other.data);
final ParameterTool ret = new ParameterTool(resultData);
final HashSet<String> requestedParametersLeft = new HashSet<>(data.keySet());
requestedParametersLeft.removeAll(unrequestedParameters);
final HashSet<String> requestedParametersRight = new HashSet<>(other.data.keySet());
requestedParametersRight.removeAll(other.unrequestedParameters);
ret.unrequestedParameters.removeAll(requestedParametersLeft);
ret.unrequestedParameters.removeAll(requestedParametersRight);
return ret;
}
@Override
/*
* The value collection has at least one value, do not need to check null here.
* If there are multiple values, only the first one will be used.
*/
public Map<String, String> toMap() {
return data.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> (String) e.getValue().toArray()[0]
));
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
defaultData = new ConcurrentHashMap<>(data.size());
unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(data.size()));
}
}
|
class ParameterTool extends AbstractParameterTool {
private static final long serialVersionUID = 1L;
/**
* Returns {@link ParameterTool} for the given arguments. The arguments are keys followed by values.
* Keys have to start with '-' or '--'
*
* <p><strong>Example arguments:</strong>
* --key1 value1 --key2 value2 -key3 value3
*
* @param args Input array arguments
* @return A {@link ParameterTool}
*/
/**
* Returns {@link ParameterTool} for the given {@link Properties} file.
*
* @param path Path to the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(String path) throws IOException {
File propertiesFile = new File(path);
return fromPropertiesFile(propertiesFile);
}
/**
* Returns {@link ParameterTool} for the given {@link Properties} file.
*
* @param file File object to the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(File file) throws IOException {
if (!file.exists()) {
throw new FileNotFoundException("Properties file " + file.getAbsolutePath() + " does not exist");
}
try (FileInputStream fis = new FileInputStream(file)) {
return fromPropertiesFile(fis);
}
}
/**
* Returns {@link ParameterTool} for the given InputStream from {@link Properties} file.
*
* @param inputStream InputStream from the properties file
* @return A {@link ParameterTool}
* @throws IOException If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(InputStream inputStream) throws IOException {
Properties props = new Properties();
props.load(inputStream);
return fromMap((Map) props);
}
/**
* Returns {@link ParameterTool} for the given map.
*
* @param map A map of arguments. Both Key and Value have to be Strings
* @return A {@link ParameterTool}
*/
public static ParameterTool fromMap(Map<String, String> map) {
Preconditions.checkNotNull(map, "Unable to initialize from empty map");
return new ParameterTool(map);
}
/**
* Returns {@link ParameterTool} from the system properties.
* Example on how to pass system properties:
* -Dkey1=value1 -Dkey2=value2
*
* @return A {@link ParameterTool}
*/
public static ParameterTool fromSystemProperties() {
return fromMap((Map) System.getProperties());
}
protected final Map<String, String> data;
private ParameterTool(Map<String, String> data) {
this.data = Collections.unmodifiableMap(new HashMap<>(data));
this.defaultData = new ConcurrentHashMap<>(data.size());
this.unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(data.size()));
unrequestedParameters.addAll(data.keySet());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ParameterTool that = (ParameterTool) o;
return Objects.equals(data, that.data) &&
Objects.equals(defaultData, that.defaultData) &&
Objects.equals(unrequestedParameters, that.unrequestedParameters);
}
@Override
public int hashCode() {
return Objects.hash(data, defaultData, unrequestedParameters);
}
/**
* Returns number of parameters in {@link ParameterTool}.
*/
@Override
public int getNumberOfParameters() {
return data.size();
}
/**
* Returns the String value for the given key.
* If the key does not exist it will return null.
*/
@Override
public String get(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return data.get(key);
}
/**
* Check if value is set.
*/
@Override
public boolean has(String value) {
addToDefaults(value, null);
unrequestedParameters.remove(value);
return data.containsKey(value);
}
/**
* Returns a {@link Configuration} object from this {@link ParameterTool}.
*
* @return A {@link Configuration}
*/
public Configuration getConfiguration() {
final Configuration conf = new Configuration();
for (Map.Entry<String, String> entry : data.entrySet()) {
conf.setString(entry.getKey(), entry.getValue());
}
return conf;
}
/**
* Returns a {@link Properties} object from this {@link ParameterTool}.
*
* @return A {@link Properties}
*/
public Properties getProperties() {
Properties props = new Properties();
props.putAll(this.data);
return props;
}
/**
* Create a properties file with all the known parameters (call after the last get*() call).
* Set the default value, if available.
*
* <p>Use this method to create a properties file skeleton.
*
* @param pathToFile Location of the default properties file.
*/
public void createPropertiesFile(String pathToFile) throws IOException {
createPropertiesFile(pathToFile, true);
}
/**
* Create a properties file with all the known parameters (call after the last get*() call).
* Set the default value, if overwrite is true.
*
* @param pathToFile Location of the default properties file.
* @param overwrite Boolean flag indicating whether or not to overwrite the file
* @throws IOException If overwrite is not allowed and the file exists
*/
public void createPropertiesFile(String pathToFile, boolean overwrite) throws IOException {
final File file = new File(pathToFile);
if (file.exists()) {
if (overwrite) {
file.delete();
} else {
throw new RuntimeException("File " + pathToFile + " exists and overwriting is not allowed");
}
}
final Properties defaultProps = new Properties();
defaultProps.putAll(this.defaultData);
try (final OutputStream out = new FileOutputStream(file)) {
defaultProps.store(out, "Default file created by Flink's ParameterUtil.createPropertiesFile()");
}
}
@Override
protected Object clone() throws CloneNotSupportedException {
return new ParameterTool(this.data);
}
/**
* Merges two {@link ParameterTool}.
*
* @param other Other {@link ParameterTool} object
* @return The Merged {@link ParameterTool}
*/
public ParameterTool mergeWith(ParameterTool other) {
final Map<String, String> resultData = new HashMap<>(data.size() + other.data.size());
resultData.putAll(data);
resultData.putAll(other.data);
final ParameterTool ret = new ParameterTool(resultData);
final HashSet<String> requestedParametersLeft = new HashSet<>(data.keySet());
requestedParametersLeft.removeAll(unrequestedParameters);
final HashSet<String> requestedParametersRight = new HashSet<>(other.data.keySet());
requestedParametersRight.removeAll(other.unrequestedParameters);
ret.unrequestedParameters.removeAll(requestedParametersLeft);
ret.unrequestedParameters.removeAll(requestedParametersRight);
return ret;
}
@Override
public Map<String, String> toMap() {
return data;
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
defaultData = new ConcurrentHashMap<>(data.size());
unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(data.size()));
}
}
|
I think this todo could be removed, since the proxy is inside http client now.
|
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
final HttpRequest request = context.getHttpRequest();
return next.process().flatMap(
response -> {
if (!isResponseSuccessful(response)) {
HttpResponse bufferedResponse = response.buffer();
return FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).flatMap(
body -> {
String bodyStr = new String(body, StandardCharsets.UTF_8);
AzureJacksonAdapter jacksonAdapter = new AzureJacksonAdapter();
CloudError cloudError;
try {
cloudError = jacksonAdapter.deserialize(
bodyStr, CloudError.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.just(bufferedResponse);
}
if (cloudError != null && MISSING_SUBSCRIPTION_REGISTRATION.equals(cloudError.getCode())) {
String subscriptionId = ResourceUtils.extractFromResourceId(
request.getUrl().getPath(), "subscriptions");
ResourceManager resourceManager = ResourceManager.authenticate(credential)
.withSubscription(subscriptionId);
Pattern providerPattern = Pattern.compile(".*'(.*)'");
Matcher providerMatcher = providerPattern.matcher(cloudError.getMessage());
providerMatcher.find();
return registerProviderUntilSuccess(providerMatcher.group(1), resourceManager)
.flatMap(afterRegistered -> next.process());
}
return Mono.just(bufferedResponse);
}
);
}
return Mono.just(response);
}
);
}
|
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
return next.process().flatMap(
response -> {
if (!isResponseSuccessful(response)) {
HttpResponse bufferedResponse = response.buffer();
return FluxUtil.collectBytesInByteBufferStream(bufferedResponse.getBody()).flatMap(
body -> {
String bodyStr = new String(body, StandardCharsets.UTF_8);
AzureJacksonAdapter jacksonAdapter = new AzureJacksonAdapter();
CloudError cloudError;
try {
cloudError = jacksonAdapter.deserialize(
bodyStr, CloudError.class, SerializerEncoding.JSON);
} catch (IOException e) {
return Mono.just(bufferedResponse);
}
if (cloudError != null && MISSING_SUBSCRIPTION_REGISTRATION.equals(cloudError.getCode())) {
ResourceManager resourceManager = ResourceManager.authenticate(credential, profile)
.withDefaultSubscription();
Pattern providerPattern = Pattern.compile(".*'(.*)'");
Matcher providerMatcher = providerPattern.matcher(cloudError.getMessage());
providerMatcher.find();
return registerProviderUntilSuccess(providerMatcher.group(1), resourceManager)
.flatMap(afterRegistered -> next.process());
}
return Mono.just(bufferedResponse);
}
);
}
return Mono.just(response);
}
);
}
|
class ProviderRegistrationPolicy implements HttpPipelinePolicy {
private static final String MISSING_SUBSCRIPTION_REGISTRATION = "MissingSubscriptionRegistration";
private final TokenCredential credential;
/**
* Initialize a provider registration policy with a credential that's authorized
* to register the provider.
* @param credential the credential for provider registration
*/
public ProviderRegistrationPolicy(TokenCredential credential) {
this.credential = credential;
}
private boolean isResponseSuccessful(HttpResponse response) {
return response.getStatusCode() >= 200 && response.getStatusCode() < 300;
}
@Override
private Mono<Void> registerProviderUntilSuccess(String namespace, ResourceManager resourceManager) {
return resourceManager.providers().registerAsync(namespace)
.flatMap(
provider -> {
if (isProviderRegistered(provider)) {
return Mono.empty();
}
return resourceManager.providers().getByNameAsync(namespace)
.flatMap(providerGet -> checkProviderRegistered(providerGet))
.retry(60, ProviderUnregisteredException.class::isInstance);
}
);
}
private Mono<Void> checkProviderRegistered(Provider provider) throws ProviderUnregisteredException {
if (isProviderRegistered(provider)) {
return Mono.empty();
}
SdkContext.sleep(5 * 1000);
return Mono.error(new ProviderUnregisteredException());
}
private boolean isProviderRegistered(Provider provider) {
return provider.registrationState().equalsIgnoreCase("Registered");
}
private class ProviderUnregisteredException extends RuntimeException {
}
}
|
class ProviderRegistrationPolicy implements HttpPipelinePolicy {
private static final String MISSING_SUBSCRIPTION_REGISTRATION = "MissingSubscriptionRegistration";
private final TokenCredential credential;
private final AzureProfile profile;
/**
* Initialize a provider registration policy with a credential that's authorized
* to register the provider.
* @param credential the credential for provider registration
* @param profile the profile to use
*/
public ProviderRegistrationPolicy(TokenCredential credential, AzureProfile profile) {
this.credential = credential;
this.profile = profile;
}
private boolean isResponseSuccessful(HttpResponse response) {
return response.getStatusCode() >= 200 && response.getStatusCode() < 300;
}
@Override
private Mono<Void> registerProviderUntilSuccess(String namespace, ResourceManager resourceManager) {
return resourceManager.providers().registerAsync(namespace)
.flatMap(
provider -> {
if (isProviderRegistered(provider)) {
return Mono.empty();
}
return resourceManager.providers().getByNameAsync(namespace)
.flatMap(providerGet -> checkProviderRegistered(providerGet))
.retry(60, ProviderUnregisteredException.class::isInstance);
}
);
}
private Mono<Void> checkProviderRegistered(Provider provider) throws ProviderUnregisteredException {
if (isProviderRegistered(provider)) {
return Mono.empty();
}
SdkContext.sleep(5 * 1000);
return Mono.error(new ProviderUnregisteredException());
}
private boolean isProviderRegistered(Provider provider) {
return provider.registrationState().equalsIgnoreCase("Registered");
}
private class ProviderUnregisteredException extends RuntimeException {
}
}
|
|
```suggestion ComputeNode node = GlobalStateMgr.getCurrentSystemInfo().getBackendOrComputeNodeWithBePort(host, port); ```
|
public Status releaseMetadataLocks() {
for (TScanRangeLocations tablet : tabletLocations) {
TScanRange scanRange = tablet.getScan_range();
if (!scanRange.isSetInternal_scan_range()) {
continue;
}
TInternalScanRange internalScanRange = scanRange.getInternal_scan_range();
List<TScanRangeLocation> locations = tablet.getLocations();
for (TScanRangeLocation location : locations) {
TNetworkAddress address = location.getServer();
String host = address.getHostname();
int port = address.getPort();
ComputeNode node = GlobalStateMgr.getCurrentSystemInfo().getBackendWithBePort(host, port);
if (RunMode.getCurrentRunMode() == RunMode.SHARED_DATA) {
node = GlobalStateMgr.getCurrentSystemInfo().getBackendOrComputeNodeWithBePort(host, port);
}
if (node == null) {
continue;
}
try {
LakeService lakeService = BrpcProxy.getLakeService(host, port);
UnlockTabletMetadataRequest request = new UnlockTabletMetadataRequest();
request.tabletId = internalScanRange.getTablet_id();
request.version = Long.parseLong(internalScanRange.getVersion());
request.expireTime = (getCreateTimeMs() / 1000) + getTimeoutSecond();
lakeService.unlockTabletMetadata(request);
} catch (Throwable e) {
LOG.error("Fail to release metadata lock, job id {}, tablet id {}, version {}", id,
tableId, internalScanRange.getVersion());
}
}
}
return Status.OK;
}
|
}
|
public Status releaseMetadataLocks() {
for (TScanRangeLocations tablet : tabletLocations) {
TScanRange scanRange = tablet.getScan_range();
if (!scanRange.isSetInternal_scan_range()) {
continue;
}
TInternalScanRange internalScanRange = scanRange.getInternal_scan_range();
List<TScanRangeLocation> locations = tablet.getLocations();
for (TScanRangeLocation location : locations) {
TNetworkAddress address = location.getServer();
String host = address.getHostname();
int port = address.getPort();
ComputeNode node = GlobalStateMgr.getCurrentSystemInfo().getBackendOrComputeNodeWithBePort(host, port);
if (!GlobalStateMgr.getCurrentSystemInfo().checkNodeAvailable(node)) {
continue;
}
try {
LakeService lakeService = BrpcProxy.getLakeService(host, port);
UnlockTabletMetadataRequest request = new UnlockTabletMetadataRequest();
request.tabletId = internalScanRange.getTablet_id();
request.version = Long.parseLong(internalScanRange.getVersion());
request.expireTime = (getCreateTimeMs() / 1000) + getTimeoutSecond();
lakeService.unlockTabletMetadata(request);
} catch (Throwable e) {
LOG.error("Fail to release metadata lock, job id {}, tablet id {}, version {}", id,
tableId, internalScanRange.getVersion());
}
}
}
return Status.OK;
}
|
class ExportJob implements Writable, GsonPostProcessable {
private static final Logger LOG = LogManager.getLogger(ExportJob.class);
private final DescriptorTable desc;
private final Set<String> exportedTempFiles = Sets.newConcurrentHashSet();
private Set<String> exportedFiles = Sets.newConcurrentHashSet();
private final Analyzer analyzer;
private final List<Coordinator> coordList = Lists.newArrayList();
private final AtomicInteger nextId = new AtomicInteger(0);
private List<Pair<TNetworkAddress, String>> snapshotPaths = Lists.newArrayList();
private final Map<Long, Long> beLastStartTime = Maps.newHashMap();
@SerializedName("id")
private long id;
private UUID queryId;
@SerializedName("qd")
private String queryIdString;
@SerializedName("dd")
private long dbId;
@SerializedName("td")
private long tableId;
@SerializedName("bd")
private BrokerDesc brokerDesc;
@SerializedName("ep")
private String exportPath;
private String exportTempPath;
private String fileNamePrefix;
@SerializedName("cs")
private String columnSeparator;
@SerializedName("rd")
private String rowDelimiter;
private boolean includeQueryId;
@SerializedName("pt")
private Map<String, String> properties = Maps.newHashMap();
@SerializedName("ps")
private List<String> partitions;
@SerializedName("tn")
private TableName tableName;
private List<String> columnNames;
private String sql = "";
@SerializedName("se")
private JobState state;
@SerializedName("ct")
private long createTimeMs;
@SerializedName("st")
private long startTimeMs;
@SerializedName("ft")
private long finishTimeMs;
@SerializedName("pg")
private int progress;
@SerializedName("fm")
private ExportFailMsg failMsg;
private TupleDescriptor exportTupleDesc;
private Table exportTable;
private boolean isReplayed = false;
private Thread doExportingThread;
private List<TScanRangeLocations> tabletLocations = Lists.newArrayList();
public ExportJob() {
this.id = -1;
this.queryId = null;
this.dbId = -1;
this.tableId = -1;
this.state = JobState.PENDING;
this.progress = 0;
this.createTimeMs = System.currentTimeMillis();
this.startTimeMs = -1;
this.finishTimeMs = -1;
this.failMsg = new ExportFailMsg(ExportFailMsg.CancelType.UNKNOWN, "");
this.analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), null);
this.desc = analyzer.getDescTbl();
this.exportPath = "";
this.exportTempPath = "";
this.fileNamePrefix = "";
this.columnSeparator = "\t";
this.rowDelimiter = "\n";
this.includeQueryId = true;
}
public ExportJob(long jobId, UUID queryId) {
this();
this.id = jobId;
this.queryId = queryId;
this.queryIdString = queryId.toString();
}
public void setJob(ExportStmt stmt) throws UserException {
String dbName = stmt.getTblName().getDb();
Database db = GlobalStateMgr.getCurrentState().getDb(dbName);
if (db == null) {
throw new DdlException("Database " + dbName + " does not exist");
}
this.brokerDesc = stmt.getBrokerDesc();
Preconditions.checkNotNull(brokerDesc);
this.columnSeparator = stmt.getColumnSeparator();
this.rowDelimiter = stmt.getRowDelimiter();
this.includeQueryId = stmt.isIncludeQueryId();
this.properties = stmt.getProperties();
exportPath = stmt.getPath();
Preconditions.checkArgument(!Strings.isNullOrEmpty(exportPath));
exportTempPath = this.exportPath + "__starrocks_export_tmp_" + queryId.toString();
fileNamePrefix = stmt.getFileNamePrefix();
Preconditions.checkArgument(!Strings.isNullOrEmpty(fileNamePrefix));
if (includeQueryId) {
fileNamePrefix += queryId.toString() + "_";
}
this.partitions = stmt.getPartitions();
this.columnNames = stmt.getColumnNames();
db.readLock();
try {
this.dbId = db.getId();
this.exportTable = db.getTable(stmt.getTblName().getTbl());
if (exportTable == null) {
throw new DdlException("Table " + stmt.getTblName().getTbl() + " does not exist");
}
this.tableId = exportTable.getId();
this.tableName = stmt.getTblName();
genExecFragment(stmt);
} finally {
db.readUnlock();
}
this.sql = stmt.toSql();
}
private void genExecFragment(ExportStmt stmt) throws UserException {
registerToDesc();
plan(stmt);
}
private void registerToDesc() throws UserException {
TableRef ref = new TableRef(tableName, null, partitions == null ? null : new PartitionNames(false, partitions));
BaseTableRef tableRef = new BaseTableRef(ref, exportTable, tableName);
exportTupleDesc = desc.createTupleDescriptor();
exportTupleDesc.setTable(exportTable);
exportTupleDesc.setRef(tableRef);
Map<String, Column> nameToColumn = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
List<Column> tableColumns = exportTable.getBaseSchema();
List<Column> exportColumns = Lists.newArrayList();
for (Column column : tableColumns) {
nameToColumn.put(column.getName(), column);
}
if (columnNames == null) {
exportColumns.addAll(tableColumns);
} else {
for (String columnName : columnNames) {
if (!nameToColumn.containsKey(columnName)) {
throw new UserException("Column [" + columnName + "] does not exist in table.");
}
exportColumns.add(nameToColumn.get(columnName));
}
}
for (Column col : exportColumns) {
SlotDescriptor slot = desc.addSlotDescriptor(exportTupleDesc);
slot.setIsMaterialized(true);
slot.setColumn(col);
slot.setIsNullable(col.isAllowNull());
}
desc.computeMemLayout();
}
private void plan(ExportStmt stmt) throws UserException {
List<PlanFragment> fragments = Lists.newArrayList();
List<ScanNode> scanNodes = Lists.newArrayList();
ScanNode scanNode = genScanNode();
tabletLocations = scanNode.getScanRangeLocations(0);
if (tabletLocations == null) {
PlanFragment fragment = genPlanFragment(exportTable.getType(), scanNode, 0);
scanNodes.add(scanNode);
fragments.add(fragment);
} else {
for (TScanRangeLocations tablet : tabletLocations) {
List<TScanRangeLocation> locations = tablet.getLocations();
Collections.shuffle(locations);
tablet.setLocations(locations.subList(0, 1));
}
long maxBytesPerBe = Config.export_max_bytes_per_be_per_task;
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
List<TScanRangeLocations> copyTabletLocations = Lists.newArrayList(tabletLocations);
int taskIdx = 0;
while (!copyTabletLocations.isEmpty()) {
Map<Long, Long> bytesPerBe = Maps.newHashMap();
List<TScanRangeLocations> taskTabletLocations = Lists.newArrayList();
Iterator<TScanRangeLocations> iter = copyTabletLocations.iterator();
while (iter.hasNext()) {
TScanRangeLocations scanRangeLocations = iter.next();
long tabletId = scanRangeLocations.getScan_range().getInternal_scan_range().getTablet_id();
long backendId = scanRangeLocations.getLocations().get(0).getBackend_id();
Replica replica = invertedIndex.getReplica(tabletId, backendId);
long dataSize = replica != null ? replica.getDataSize() : 0L;
Long assignedBytes = bytesPerBe.get(backendId);
if (assignedBytes == null || assignedBytes < maxBytesPerBe) {
taskTabletLocations.add(scanRangeLocations);
bytesPerBe.put(backendId, assignedBytes != null ? assignedBytes + dataSize : dataSize);
iter.remove();
}
}
OlapScanNode taskScanNode = genOlapScanNodeByLocation(taskTabletLocations);
scanNodes.add(taskScanNode);
PlanFragment fragment = genPlanFragment(exportTable.getType(), taskScanNode, taskIdx++);
fragments.add(fragment);
}
LOG.info("total {} tablets of export job {}, and assign them to {} coordinators",
tabletLocations.size(), id, fragments.size());
}
genCoordinators(stmt, fragments, scanNodes);
}
private ScanNode genScanNode() throws UserException {
ScanNode scanNode = null;
switch (exportTable.getType()) {
case OLAP:
case CLOUD_NATIVE:
scanNode = new OlapScanNode(new PlanNodeId(0), exportTupleDesc, "OlapScanNodeForExport");
scanNode.setColumnFilters(Maps.newHashMap());
((OlapScanNode) scanNode).setIsPreAggregation(false, "This an export operation");
((OlapScanNode) scanNode).setCanTurnOnPreAggr(false);
scanNode.init(analyzer);
((OlapScanNode) scanNode).selectBestRollupByRollupSelector();
break;
case MYSQL:
scanNode = new MysqlScanNode(new PlanNodeId(0), exportTupleDesc, (MysqlTable) this.exportTable);
break;
default:
throw new UserException("Unsupported table type: " + exportTable.getType());
}
scanNode.finalizeStats(analyzer);
return scanNode;
}
private OlapScanNode genOlapScanNodeByLocation(List<TScanRangeLocations> locations) {
return OlapScanNode.createOlapScanNodeByLocation(
new PlanNodeId(nextId.getAndIncrement()),
exportTupleDesc,
"OlapScanNodeForExport",
locations);
}
private PlanFragment genPlanFragment(Table.TableType type, ScanNode scanNode, int taskIdx) throws UserException {
PlanFragment fragment = null;
switch (exportTable.getType()) {
case OLAP:
case CLOUD_NATIVE:
fragment = new PlanFragment(
new PlanFragmentId(nextId.getAndIncrement()), scanNode, DataPartition.RANDOM);
break;
case MYSQL:
fragment = new PlanFragment(
new PlanFragmentId(nextId.getAndIncrement()), scanNode, DataPartition.UNPARTITIONED);
break;
default:
break;
}
if (fragment == null) {
throw new UserException("invalid table type:" + exportTable.getType());
}
fragment.setOutputExprs(createOutputExprs());
scanNode.setFragmentId(fragment.getFragmentId());
THdfsProperties hdfsProperties = new THdfsProperties();
if (!brokerDesc.hasBroker()) {
HdfsUtil.getTProperties(exportTempPath, brokerDesc, hdfsProperties);
}
fragment.setSink(new ExportSink(exportTempPath, fileNamePrefix + taskIdx + "_", columnSeparator,
rowDelimiter, brokerDesc, hdfsProperties));
try {
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
} catch (Exception e) {
LOG.info("Fragment finalize failed. e=", e);
throw new UserException("Fragment finalize failed");
}
return fragment;
}
private List<Expr> createOutputExprs() {
List<Expr> outputExprs = Lists.newArrayList();
for (int i = 0; i < exportTupleDesc.getSlots().size(); ++i) {
SlotDescriptor slotDesc = exportTupleDesc.getSlots().get(i);
SlotRef slotRef = new SlotRef(slotDesc);
if (slotDesc.getType().getPrimitiveType() == PrimitiveType.CHAR) {
slotRef.setType(Type.CHAR);
}
outputExprs.add(slotRef);
}
return outputExprs;
}
private Coordinator.Factory getCoordinatorFactory() {
return new DefaultCoordinator.Factory();
}
private void genCoordinators(ExportStmt stmt, List<PlanFragment> fragments, List<ScanNode> nodes) {
UUID uuid = UUID.randomUUID();
for (int i = 0; i < fragments.size(); ++i) {
PlanFragment fragment = fragments.get(i);
ScanNode scanNode = nodes.get(i);
TUniqueId queryId = new TUniqueId(uuid.getMostSignificantBits() + i, uuid.getLeastSignificantBits());
Coordinator coord = getCoordinatorFactory().createBrokerExportScheduler(
id, queryId, desc, Lists.newArrayList(fragment), Lists.newArrayList(scanNode),
TimeUtils.DEFAULT_TIME_ZONE, stmt.getExportStartTime(), Maps.newHashMap(), getMemLimit());
this.coordList.add(coord);
LOG.info("split export job to tasks. job id: {}, job query id: {}, task idx: {}, task query id: {}",
id, DebugUtil.printId(this.queryId), i, DebugUtil.printId(queryId));
}
LOG.info("create {} coordintors for export job: {}", coordList.size(), id);
}
public Coordinator resetCoord(int taskIndex, TUniqueId newQueryId) throws UserException {
Coordinator coord = coordList.get(taskIndex);
OlapScanNode olapScanNode = (OlapScanNode) coord.getScanNodes().get(0);
List<TScanRangeLocations> locations = olapScanNode.getScanRangeLocations(0);
if (locations.size() == 0) {
throw new UserException("SubExportTask " + taskIndex + " scan range is empty");
}
OlapScanNode newOlapScanNode = new OlapScanNode(new PlanNodeId(0), exportTupleDesc, "OlapScanNodeForExport");
Analyzer tmpAnalyzer = new Analyzer(GlobalStateMgr.getCurrentState(), null);
newOlapScanNode.setColumnFilters(Maps.newHashMap());
newOlapScanNode.setIsPreAggregation(false, "This an export operation");
newOlapScanNode.setCanTurnOnPreAggr(false);
newOlapScanNode.init(tmpAnalyzer);
newOlapScanNode.selectBestRollupByRollupSelector();
List<TScanRangeLocations> newLocations = newOlapScanNode.updateScanRangeLocations(locations);
for (TScanRangeLocations tablet : newLocations) {
List<TScanRangeLocation> tabletLocations = tablet.getLocations();
Collections.shuffle(tabletLocations);
tablet.setLocations(tabletLocations.subList(0, 1));
}
OlapScanNode newTaskScanNode = genOlapScanNodeByLocation(newLocations);
PlanFragment newFragment = genPlanFragment(exportTable.getType(), newTaskScanNode, taskIndex);
Coordinator newCoord = getCoordinatorFactory().createBrokerExportScheduler(
id, newQueryId, desc, Lists.newArrayList(newFragment), Lists.newArrayList(newTaskScanNode),
TimeUtils.DEFAULT_TIME_ZONE, coord.getStartTimeMs(), Maps.newHashMap(), getMemLimit());
this.coordList.set(taskIndex, newCoord);
LOG.info("reset coordinator for export job: {}, taskIdx: {}", id, taskIndex);
return newCoord;
}
public boolean needResetCoord() {
return exportTable.isOlapTable();
}
public void setSnapshotPaths(List<Pair<TNetworkAddress, String>> snapshotPaths) {
this.snapshotPaths = snapshotPaths;
}
public void setExportTempPath(String exportTempPath) {
this.exportTempPath = exportTempPath;
}
public void setExportedFiles(Set<String> exportedFiles) {
this.exportedFiles = exportedFiles;
}
public void setBeStartTime(long beId, long lastStartTime) {
this.beLastStartTime.put(beId, lastStartTime);
}
public void setFailMsg(ExportFailMsg failMsg) {
this.failMsg = failMsg;
}
public Map<Long, Long> getBeStartTimeMap() {
return this.beLastStartTime;
}
public long getId() {
return id;
}
public UUID getQueryId() {
return queryId;
}
public long getDbId() {
return dbId;
}
public long getTableId() {
return this.tableId;
}
public JobState getState() {
return state;
}
public BrokerDesc getBrokerDesc() {
return brokerDesc;
}
public void setBrokerDesc(BrokerDesc brokerDesc) {
this.brokerDesc = brokerDesc;
}
public String getExportPath() {
return exportPath;
}
public String getColumnSeparator() {
return this.columnSeparator;
}
public String getRowDelimiter() {
return this.rowDelimiter;
}
public long getMemLimit() {
if (properties.containsKey(LoadStmt.LOAD_MEM_LIMIT)) {
return Long.parseLong(properties.get(LoadStmt.LOAD_MEM_LIMIT));
} else {
return 0;
}
}
public int getTimeoutSecond() {
if (properties.containsKey(LoadStmt.TIMEOUT_PROPERTY)) {
return Integer.parseInt(properties.get(LoadStmt.TIMEOUT_PROPERTY));
} else {
return Config.export_task_default_timeout_second;
}
}
public List<String> getPartitions() {
return partitions;
}
public List<String> getColumnNames() {
return columnNames;
}
public synchronized int getProgress() {
return progress;
}
public synchronized void setProgress(int progress) {
this.progress = progress;
}
public long getCreateTimeMs() {
return createTimeMs;
}
public long getStartTimeMs() {
return startTimeMs;
}
public long getFinishTimeMs() {
return finishTimeMs;
}
public ExportFailMsg getFailMsg() {
return failMsg;
}
public Set<String> getExportedTempFiles() {
return this.exportedTempFiles;
}
public String getExportedTempPath() {
return this.exportTempPath;
}
public Set<String> getExportedFiles() {
return this.exportedFiles;
}
public synchronized void addExportedTempFiles(List<String> files) {
exportedTempFiles.addAll(files);
LOG.debug("exported temp files: {}", this.exportedTempFiles);
}
public synchronized void clearExportedTempFiles() {
exportedTempFiles.clear();
}
public synchronized void addExportedFile(String file) {
exportedFiles.add(file);
LOG.debug("exported files: {}", this.exportedFiles);
}
public synchronized Thread getDoExportingThread() {
return doExportingThread;
}
public synchronized void setDoExportingThread(Thread isExportingThread) {
this.doExportingThread = isExportingThread;
}
public List<Coordinator> getCoordList() {
return coordList;
}
public List<TScanRangeLocations> getTabletLocations() {
return tabletLocations;
}
public List<Pair<TNetworkAddress, String>> getSnapshotPaths() {
return this.snapshotPaths;
}
public void addSnapshotPath(Pair<TNetworkAddress, String> snapshotPath) {
this.snapshotPaths.add(snapshotPath);
}
public String getSql() {
return sql;
}
public TableName getTableName() {
return tableName;
}
public synchronized boolean updateState(JobState newState) {
return this.updateState(newState, false, System.currentTimeMillis());
}
public synchronized boolean updateState(JobState newState, boolean isReplay, long stateChangeTime) {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return false;
}
state = newState;
switch (newState) {
case PENDING:
progress = 0;
break;
case EXPORTING:
startTimeMs = stateChangeTime;
break;
case FINISHED:
case CANCELLED:
finishTimeMs = stateChangeTime;
progress = 100;
break;
default:
Preconditions.checkState(false, "wrong job state: " + newState.name());
break;
}
if (!isReplay) {
GlobalStateMgr.getCurrentState().getEditLog().logExportUpdateState(id, newState, stateChangeTime,
snapshotPaths, exportTempPath, exportedFiles, failMsg);
}
return true;
}
public Status releaseSnapshots() {
switch (exportTable.getType()) {
case OLAP:
case MYSQL:
return releaseSnapshotPaths();
case CLOUD_NATIVE:
return releaseMetadataLocks();
default:
return Status.OK;
}
}
public Status releaseSnapshotPaths() {
List<Pair<TNetworkAddress, String>> snapshotPaths = getSnapshotPaths();
LOG.debug("snapshotPaths:{}", snapshotPaths);
for (Pair<TNetworkAddress, String> snapshotPath : snapshotPaths) {
TNetworkAddress address = snapshotPath.first;
String host = address.getHostname();
int port = address.getPort();
ComputeNode node = GlobalStateMgr.getCurrentSystemInfo().getBackendWithBePort(host, port);
if (node == null) {
if (RunMode.getCurrentRunMode() == RunMode.SHARED_DATA) {
node = GlobalStateMgr.getCurrentSystemInfo().getComputeNodeWithBePort(host, port);
}
if (node == null) {
continue;
}
}
long nodeId = node.getId();
if (!GlobalStateMgr.getCurrentSystemInfo().checkBackendAvailable(nodeId)) {
if (RunMode.getCurrentRunMode() == RunMode.SHARED_NOTHING) {
continue;
}
}
AgentClient client = new AgentClient(host, port);
TAgentResult result = client.releaseSnapshot(snapshotPath.second);
if (result == null || result.getStatus().getStatus_code() != TStatusCode.OK) {
continue;
}
}
snapshotPaths.clear();
return Status.OK;
}
public synchronized boolean isExportDone() {
return state == JobState.FINISHED || state == JobState.CANCELLED;
}
public synchronized void cancel(ExportFailMsg.CancelType type, String msg) throws UserException {
if (isExportDone()) {
throw new UserException("Export job [" + queryId.toString() + "] is already finished or cancelled");
}
cancelInternal(type, msg);
}
public synchronized void cancelInternal(ExportFailMsg.CancelType type, String msg) {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return;
}
try {
if (msg != null && failMsg.getCancelType() == ExportFailMsg.CancelType.UNKNOWN) {
failMsg = new ExportFailMsg(type, msg);
}
for (Coordinator coord : coordList) {
coord.cancel();
}
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportTempPath, brokerDesc);
} else {
BrokerUtil.deletePath(exportTempPath, brokerDesc);
}
LOG.info("remove export temp path success, path: {}", exportTempPath);
} catch (UserException e) {
LOG.warn("remove export temp path fail, path: {}", exportTempPath);
}
for (String exportedFile : exportedFiles) {
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportedFile, brokerDesc);
} else {
BrokerUtil.deletePath(exportedFile, brokerDesc);
}
LOG.info("remove exported file success, path: {}", exportedFile);
} catch (UserException e) {
LOG.warn("remove exported file fail, path: {}", exportedFile);
}
}
releaseSnapshots();
} finally {
updateState(ExportJob.JobState.CANCELLED);
LOG.info("export job cancelled. job: {}", this);
}
}
public synchronized void finish() {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return;
}
try {
releaseSnapshots();
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportTempPath, brokerDesc);
} else {
BrokerUtil.deletePath(exportTempPath, brokerDesc);
}
LOG.info("remove export temp path success, path: {}", exportTempPath);
} catch (UserException e) {
LOG.warn("remove export temp path fail, path: {}", exportTempPath);
}
} finally {
updateState(JobState.FINISHED);
LOG.info("export job finished. job: {}", this);
}
}
@Override
public String toString() {
return "ExportJob [jobId=" + id
+ ", dbId=" + dbId
+ ", tableId=" + tableId
+ ", state=" + state
+ ", path=" + exportPath
+ ", partitions=(" + StringUtils.join(partitions, ",") + ")"
+ ", progress=" + progress
+ ", createTimeMs=" + TimeUtils.longToTimeString(createTimeMs)
+ ", exportStartTimeMs=" + TimeUtils.longToTimeString(startTimeMs)
+ ", exportFinishTimeMs=" + TimeUtils.longToTimeString(finishTimeMs)
+ ", failMsg=" + failMsg
+ ", tmp files=(" + StringUtils.join(exportedTempFiles, ",") + ")"
+ ", files=(" + StringUtils.join(exportedFiles, ",") + ")"
+ "]";
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(id);
out.writeLong(dbId);
out.writeLong(tableId);
Text.writeString(out, exportPath);
Text.writeString(out, columnSeparator);
Text.writeString(out, rowDelimiter);
out.writeInt(properties.size());
for (Map.Entry<String, String> property : properties.entrySet()) {
Text.writeString(out, property.getKey());
Text.writeString(out, property.getValue());
}
boolean hasPartition = (partitions != null);
if (hasPartition) {
out.writeBoolean(true);
int partitionSize = partitions.size();
out.writeInt(partitionSize);
for (String partitionName : partitions) {
Text.writeString(out, partitionName);
}
} else {
out.writeBoolean(false);
}
Text.writeString(out, state.name());
out.writeLong(createTimeMs);
out.writeLong(startTimeMs);
out.writeLong(finishTimeMs);
out.writeInt(progress);
failMsg.write(out);
if (brokerDesc == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
brokerDesc.write(out);
}
tableName.write(out);
}
public void readFields(DataInput in) throws IOException {
isReplayed = true;
id = in.readLong();
dbId = in.readLong();
tableId = in.readLong();
exportPath = Text.readString(in);
columnSeparator = Text.readString(in);
rowDelimiter = Text.readString(in);
GlobalStateMgr stateMgr = GlobalStateMgr.getCurrentState();
Database db = null;
if (stateMgr.getMetadata() != null) {
db = stateMgr.getDb(dbId);
}
if (db != null) {
exportTable = db.getTable(tableId);
}
int count = in.readInt();
for (int i = 0; i < count; i++) {
String propertyKey = Text.readString(in);
String propertyValue = Text.readString(in);
this.properties.put(propertyKey, propertyValue);
}
boolean hasPartition = in.readBoolean();
if (hasPartition) {
partitions = Lists.newArrayList();
int partitionSize = in.readInt();
for (int i = 0; i < partitionSize; ++i) {
String partitionName = Text.readString(in);
partitions.add(partitionName);
}
}
state = JobState.valueOf(Text.readString(in));
createTimeMs = in.readLong();
startTimeMs = in.readLong();
finishTimeMs = in.readLong();
progress = in.readInt();
failMsg.readFields(in);
if (in.readBoolean()) {
brokerDesc = BrokerDesc.read(in);
}
tableName = new TableName();
tableName.readFields(in);
}
/**
* for ut only
*/
public void setTableName(TableName tableName) {
this.tableName = tableName;
}
@Override
public int hashCode() {
return Long.hashCode(id);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof ExportJob)) {
return false;
}
ExportJob job = (ExportJob) obj;
return this.id == job.id;
}
public boolean isReplayed() {
return isReplayed;
}
public boolean exportLakeTable() {
return exportTable.isCloudNativeTableOrMaterializedView();
}
public boolean exportOlapTable() {
return exportTable.isOlapTable();
}
public enum JobState {
PENDING,
EXPORTING,
FINISHED,
CANCELLED,
}
@Override
public void gsonPostProcess() throws IOException {
if (!Strings.isNullOrEmpty(queryIdString)) {
queryId = UUID.fromString(queryIdString);
}
isReplayed = true;
GlobalStateMgr stateMgr = GlobalStateMgr.getCurrentState();
Database db = null;
if (stateMgr.getMetadata() != null) {
db = stateMgr.getDb(dbId);
}
if (db != null) {
exportTable = db.getTable(tableId);
}
}
public static class StateTransfer implements Writable {
long jobId;
JobState state;
public StateTransfer() {
this.jobId = -1;
this.state = JobState.CANCELLED;
}
public StateTransfer(long jobId, JobState state) {
this.jobId = jobId;
this.state = state;
}
public long getJobId() {
return jobId;
}
public JobState getState() {
return state;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(jobId);
Text.writeString(out, state.name());
}
public void readFields(DataInput in) throws IOException {
jobId = in.readLong();
state = JobState.valueOf(Text.readString(in));
}
}
public static class ExportUpdateInfo implements Writable {
@SerializedName("jobId")
long jobId;
@SerializedName("state")
JobState state;
@SerializedName("stateChangeTime")
long stateChangeTime;
@SerializedName("snapshotPaths")
List<Pair<NetworkAddress, String>> snapshotPaths;
@SerializedName("exportTempPath")
String exportTempPath;
@SerializedName("exportedFiles")
Set<String> exportedFiles;
@SerializedName("failMsg")
ExportFailMsg failMsg;
public ExportUpdateInfo() {
this.jobId = -1;
this.state = JobState.CANCELLED;
this.snapshotPaths = Lists.newArrayList();
this.exportTempPath = "";
this.exportedFiles = Sets.newConcurrentHashSet();
this.failMsg = new ExportFailMsg();
}
public ExportUpdateInfo(long jobId, JobState state, long stateChangeTime,
List<Pair<TNetworkAddress, String>> snapshotPaths,
String exportTempPath, Set<String> exportedFiles, ExportFailMsg failMsg) {
this.jobId = jobId;
this.state = state;
this.stateChangeTime = stateChangeTime;
this.snapshotPaths = serialize(snapshotPaths);
this.exportTempPath = exportTempPath;
this.exportedFiles = exportedFiles;
this.failMsg = failMsg;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, ExportUpdateInfo.class);
Text.writeString(out, json);
out.writeInt(snapshotPaths.size());
for (Pair<NetworkAddress, String> entry : snapshotPaths) {
Text.writeString(out, entry.first.hostname);
out.writeInt(entry.first.port);
Text.writeString(out, entry.second);
}
}
public static ExportUpdateInfo read(DataInput input) throws IOException {
ExportUpdateInfo info = GsonUtils.GSON.fromJson(Text.readString(input), ExportUpdateInfo.class);
int snapshotPathsLen = input.readInt();
for (int i = 0; i < snapshotPathsLen; i++) {
String hostName = Text.readString(input);
int port = input.readInt();
String path = Text.readString(input);
Pair<NetworkAddress, String> entry = Pair.create(new NetworkAddress(hostName, port), path);
info.snapshotPaths.set(i, entry);
}
return info;
}
public List<Pair<NetworkAddress, String>> serialize(List<Pair<TNetworkAddress, String>> snapshotPaths) {
return snapshotPaths
.stream()
.map(snapshotPath
-> Pair.create(new NetworkAddress(snapshotPath.first.hostname, snapshotPath.first.port),
snapshotPath.second))
.collect(Collectors.toList());
}
public List<Pair<TNetworkAddress, String>> deserialize(List<Pair<NetworkAddress, String>> snapshotPaths) {
return snapshotPaths
.stream()
.map(snapshotPath
-> Pair.create(new TNetworkAddress(snapshotPath.first.hostname, snapshotPath.first.port),
snapshotPath.second))
.collect(Collectors.toList());
}
}
public static class NetworkAddress {
@SerializedName("h")
String hostname;
@SerializedName("p")
int port;
public NetworkAddress() {
}
public NetworkAddress(String hostname, int port) {
this.hostname = hostname;
this.port = port;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
@Override
public boolean equals(Object obj) {
return obj instanceof NetworkAddress
&& this.hostname.equals(((NetworkAddress) obj).hostname)
&& this.port == ((NetworkAddress) obj).port;
}
@Override
public int hashCode() {
return Objects.hash(hostname, port);
}
@Override
public String toString() {
return hostname + ":" + port;
}
}
}
|
class ExportJob implements Writable, GsonPostProcessable {
private static final Logger LOG = LogManager.getLogger(ExportJob.class);
private final DescriptorTable desc;
private final Set<String> exportedTempFiles = Sets.newConcurrentHashSet();
private Set<String> exportedFiles = Sets.newConcurrentHashSet();
private final Analyzer analyzer;
private final List<Coordinator> coordList = Lists.newArrayList();
private final AtomicInteger nextId = new AtomicInteger(0);
private List<Pair<TNetworkAddress, String>> snapshotPaths = Lists.newArrayList();
private final Map<Long, Long> beLastStartTime = Maps.newHashMap();
@SerializedName("id")
private long id;
private UUID queryId;
@SerializedName("qd")
private String queryIdString;
@SerializedName("dd")
private long dbId;
@SerializedName("td")
private long tableId;
@SerializedName("bd")
private BrokerDesc brokerDesc;
@SerializedName("ep")
private String exportPath;
private String exportTempPath;
private String fileNamePrefix;
@SerializedName("cs")
private String columnSeparator;
@SerializedName("rd")
private String rowDelimiter;
private boolean includeQueryId;
@SerializedName("pt")
private Map<String, String> properties = Maps.newHashMap();
@SerializedName("ps")
private List<String> partitions;
@SerializedName("tn")
private TableName tableName;
private List<String> columnNames;
private String sql = "";
@SerializedName("se")
private JobState state;
@SerializedName("ct")
private long createTimeMs;
@SerializedName("st")
private long startTimeMs;
@SerializedName("ft")
private long finishTimeMs;
@SerializedName("pg")
private int progress;
@SerializedName("fm")
private ExportFailMsg failMsg;
private TupleDescriptor exportTupleDesc;
private Table exportTable;
private boolean isReplayed = false;
private Thread doExportingThread;
private List<TScanRangeLocations> tabletLocations = Lists.newArrayList();
public ExportJob() {
this.id = -1;
this.queryId = null;
this.dbId = -1;
this.tableId = -1;
this.state = JobState.PENDING;
this.progress = 0;
this.createTimeMs = System.currentTimeMillis();
this.startTimeMs = -1;
this.finishTimeMs = -1;
this.failMsg = new ExportFailMsg(ExportFailMsg.CancelType.UNKNOWN, "");
this.analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), null);
this.desc = analyzer.getDescTbl();
this.exportPath = "";
this.exportTempPath = "";
this.fileNamePrefix = "";
this.columnSeparator = "\t";
this.rowDelimiter = "\n";
this.includeQueryId = true;
}
public ExportJob(long jobId, UUID queryId) {
this();
this.id = jobId;
this.queryId = queryId;
this.queryIdString = queryId.toString();
}
public void setJob(ExportStmt stmt) throws UserException {
String dbName = stmt.getTblName().getDb();
Database db = GlobalStateMgr.getCurrentState().getDb(dbName);
if (db == null) {
throw new DdlException("Database " + dbName + " does not exist");
}
this.brokerDesc = stmt.getBrokerDesc();
Preconditions.checkNotNull(brokerDesc);
this.columnSeparator = stmt.getColumnSeparator();
this.rowDelimiter = stmt.getRowDelimiter();
this.includeQueryId = stmt.isIncludeQueryId();
this.properties = stmt.getProperties();
exportPath = stmt.getPath();
Preconditions.checkArgument(!Strings.isNullOrEmpty(exportPath));
exportTempPath = this.exportPath + "__starrocks_export_tmp_" + queryId.toString();
fileNamePrefix = stmt.getFileNamePrefix();
Preconditions.checkArgument(!Strings.isNullOrEmpty(fileNamePrefix));
if (includeQueryId) {
fileNamePrefix += queryId.toString() + "_";
}
this.partitions = stmt.getPartitions();
this.columnNames = stmt.getColumnNames();
db.readLock();
try {
this.dbId = db.getId();
this.exportTable = db.getTable(stmt.getTblName().getTbl());
if (exportTable == null) {
throw new DdlException("Table " + stmt.getTblName().getTbl() + " does not exist");
}
this.tableId = exportTable.getId();
this.tableName = stmt.getTblName();
genExecFragment(stmt);
} finally {
db.readUnlock();
}
this.sql = stmt.toSql();
}
private void genExecFragment(ExportStmt stmt) throws UserException {
registerToDesc();
plan(stmt);
}
private void registerToDesc() throws UserException {
TableRef ref = new TableRef(tableName, null, partitions == null ? null : new PartitionNames(false, partitions));
BaseTableRef tableRef = new BaseTableRef(ref, exportTable, tableName);
exportTupleDesc = desc.createTupleDescriptor();
exportTupleDesc.setTable(exportTable);
exportTupleDesc.setRef(tableRef);
Map<String, Column> nameToColumn = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
List<Column> tableColumns = exportTable.getBaseSchema();
List<Column> exportColumns = Lists.newArrayList();
for (Column column : tableColumns) {
nameToColumn.put(column.getName(), column);
}
if (columnNames == null) {
exportColumns.addAll(tableColumns);
} else {
for (String columnName : columnNames) {
if (!nameToColumn.containsKey(columnName)) {
throw new UserException("Column [" + columnName + "] does not exist in table.");
}
exportColumns.add(nameToColumn.get(columnName));
}
}
for (Column col : exportColumns) {
SlotDescriptor slot = desc.addSlotDescriptor(exportTupleDesc);
slot.setIsMaterialized(true);
slot.setColumn(col);
slot.setIsNullable(col.isAllowNull());
}
desc.computeMemLayout();
}
private void plan(ExportStmt stmt) throws UserException {
List<PlanFragment> fragments = Lists.newArrayList();
List<ScanNode> scanNodes = Lists.newArrayList();
ScanNode scanNode = genScanNode();
tabletLocations = scanNode.getScanRangeLocations(0);
if (tabletLocations == null) {
PlanFragment fragment = genPlanFragment(exportTable.getType(), scanNode, 0);
scanNodes.add(scanNode);
fragments.add(fragment);
} else {
for (TScanRangeLocations tablet : tabletLocations) {
List<TScanRangeLocation> locations = tablet.getLocations();
Collections.shuffle(locations);
tablet.setLocations(locations.subList(0, 1));
}
long maxBytesPerBe = Config.export_max_bytes_per_be_per_task;
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentInvertedIndex();
List<TScanRangeLocations> copyTabletLocations = Lists.newArrayList(tabletLocations);
int taskIdx = 0;
while (!copyTabletLocations.isEmpty()) {
Map<Long, Long> bytesPerBe = Maps.newHashMap();
List<TScanRangeLocations> taskTabletLocations = Lists.newArrayList();
Iterator<TScanRangeLocations> iter = copyTabletLocations.iterator();
while (iter.hasNext()) {
TScanRangeLocations scanRangeLocations = iter.next();
long tabletId = scanRangeLocations.getScan_range().getInternal_scan_range().getTablet_id();
long backendId = scanRangeLocations.getLocations().get(0).getBackend_id();
Replica replica = invertedIndex.getReplica(tabletId, backendId);
long dataSize = replica != null ? replica.getDataSize() : 0L;
Long assignedBytes = bytesPerBe.get(backendId);
if (assignedBytes == null || assignedBytes < maxBytesPerBe) {
taskTabletLocations.add(scanRangeLocations);
bytesPerBe.put(backendId, assignedBytes != null ? assignedBytes + dataSize : dataSize);
iter.remove();
}
}
OlapScanNode taskScanNode = genOlapScanNodeByLocation(taskTabletLocations);
scanNodes.add(taskScanNode);
PlanFragment fragment = genPlanFragment(exportTable.getType(), taskScanNode, taskIdx++);
fragments.add(fragment);
}
LOG.info("total {} tablets of export job {}, and assign them to {} coordinators",
tabletLocations.size(), id, fragments.size());
}
genCoordinators(stmt, fragments, scanNodes);
}
private ScanNode genScanNode() throws UserException {
ScanNode scanNode = null;
switch (exportTable.getType()) {
case OLAP:
case CLOUD_NATIVE:
scanNode = new OlapScanNode(new PlanNodeId(0), exportTupleDesc, "OlapScanNodeForExport");
scanNode.setColumnFilters(Maps.newHashMap());
((OlapScanNode) scanNode).setIsPreAggregation(false, "This an export operation");
((OlapScanNode) scanNode).setCanTurnOnPreAggr(false);
scanNode.init(analyzer);
((OlapScanNode) scanNode).selectBestRollupByRollupSelector();
break;
case MYSQL:
scanNode = new MysqlScanNode(new PlanNodeId(0), exportTupleDesc, (MysqlTable) this.exportTable);
break;
default:
throw new UserException("Unsupported table type: " + exportTable.getType());
}
scanNode.finalizeStats(analyzer);
return scanNode;
}
private OlapScanNode genOlapScanNodeByLocation(List<TScanRangeLocations> locations) {
return OlapScanNode.createOlapScanNodeByLocation(
new PlanNodeId(nextId.getAndIncrement()),
exportTupleDesc,
"OlapScanNodeForExport",
locations);
}
private PlanFragment genPlanFragment(Table.TableType type, ScanNode scanNode, int taskIdx) throws UserException {
PlanFragment fragment = null;
switch (exportTable.getType()) {
case OLAP:
case CLOUD_NATIVE:
fragment = new PlanFragment(
new PlanFragmentId(nextId.getAndIncrement()), scanNode, DataPartition.RANDOM);
break;
case MYSQL:
fragment = new PlanFragment(
new PlanFragmentId(nextId.getAndIncrement()), scanNode, DataPartition.UNPARTITIONED);
break;
default:
break;
}
if (fragment == null) {
throw new UserException("invalid table type:" + exportTable.getType());
}
fragment.setOutputExprs(createOutputExprs());
scanNode.setFragmentId(fragment.getFragmentId());
THdfsProperties hdfsProperties = new THdfsProperties();
if (!brokerDesc.hasBroker()) {
HdfsUtil.getTProperties(exportTempPath, brokerDesc, hdfsProperties);
}
fragment.setSink(new ExportSink(exportTempPath, fileNamePrefix + taskIdx + "_", columnSeparator,
rowDelimiter, brokerDesc, hdfsProperties));
try {
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
} catch (Exception e) {
LOG.info("Fragment finalize failed. e=", e);
throw new UserException("Fragment finalize failed");
}
return fragment;
}
private List<Expr> createOutputExprs() {
List<Expr> outputExprs = Lists.newArrayList();
for (int i = 0; i < exportTupleDesc.getSlots().size(); ++i) {
SlotDescriptor slotDesc = exportTupleDesc.getSlots().get(i);
SlotRef slotRef = new SlotRef(slotDesc);
if (slotDesc.getType().getPrimitiveType() == PrimitiveType.CHAR) {
slotRef.setType(Type.CHAR);
}
outputExprs.add(slotRef);
}
return outputExprs;
}
private Coordinator.Factory getCoordinatorFactory() {
return new DefaultCoordinator.Factory();
}
private void genCoordinators(ExportStmt stmt, List<PlanFragment> fragments, List<ScanNode> nodes) {
UUID uuid = UUID.randomUUID();
for (int i = 0; i < fragments.size(); ++i) {
PlanFragment fragment = fragments.get(i);
ScanNode scanNode = nodes.get(i);
TUniqueId queryId = new TUniqueId(uuid.getMostSignificantBits() + i, uuid.getLeastSignificantBits());
Coordinator coord = getCoordinatorFactory().createBrokerExportScheduler(
id, queryId, desc, Lists.newArrayList(fragment), Lists.newArrayList(scanNode),
TimeUtils.DEFAULT_TIME_ZONE, stmt.getExportStartTime(), Maps.newHashMap(), getMemLimit());
this.coordList.add(coord);
LOG.info("split export job to tasks. job id: {}, job query id: {}, task idx: {}, task query id: {}",
id, DebugUtil.printId(this.queryId), i, DebugUtil.printId(queryId));
}
LOG.info("create {} coordintors for export job: {}", coordList.size(), id);
}
public Coordinator resetCoord(int taskIndex, TUniqueId newQueryId) throws UserException {
Coordinator coord = coordList.get(taskIndex);
OlapScanNode olapScanNode = (OlapScanNode) coord.getScanNodes().get(0);
List<TScanRangeLocations> locations = olapScanNode.getScanRangeLocations(0);
if (locations.size() == 0) {
throw new UserException("SubExportTask " + taskIndex + " scan range is empty");
}
OlapScanNode newOlapScanNode = new OlapScanNode(new PlanNodeId(0), exportTupleDesc, "OlapScanNodeForExport");
Analyzer tmpAnalyzer = new Analyzer(GlobalStateMgr.getCurrentState(), null);
newOlapScanNode.setColumnFilters(Maps.newHashMap());
newOlapScanNode.setIsPreAggregation(false, "This an export operation");
newOlapScanNode.setCanTurnOnPreAggr(false);
newOlapScanNode.init(tmpAnalyzer);
newOlapScanNode.selectBestRollupByRollupSelector();
List<TScanRangeLocations> newLocations = newOlapScanNode.updateScanRangeLocations(locations);
for (TScanRangeLocations tablet : newLocations) {
List<TScanRangeLocation> tabletLocations = tablet.getLocations();
Collections.shuffle(tabletLocations);
tablet.setLocations(tabletLocations.subList(0, 1));
}
OlapScanNode newTaskScanNode = genOlapScanNodeByLocation(newLocations);
PlanFragment newFragment = genPlanFragment(exportTable.getType(), newTaskScanNode, taskIndex);
Coordinator newCoord = getCoordinatorFactory().createBrokerExportScheduler(
id, newQueryId, desc, Lists.newArrayList(newFragment), Lists.newArrayList(newTaskScanNode),
TimeUtils.DEFAULT_TIME_ZONE, coord.getStartTimeMs(), Maps.newHashMap(), getMemLimit());
this.coordList.set(taskIndex, newCoord);
LOG.info("reset coordinator for export job: {}, taskIdx: {}", id, taskIndex);
return newCoord;
}
public boolean needResetCoord() {
return exportTable.isOlapTable();
}
public void setSnapshotPaths(List<Pair<TNetworkAddress, String>> snapshotPaths) {
this.snapshotPaths = snapshotPaths;
}
public void setExportTempPath(String exportTempPath) {
this.exportTempPath = exportTempPath;
}
public void setExportedFiles(Set<String> exportedFiles) {
this.exportedFiles = exportedFiles;
}
public void setBeStartTime(long beId, long lastStartTime) {
this.beLastStartTime.put(beId, lastStartTime);
}
public void setFailMsg(ExportFailMsg failMsg) {
this.failMsg = failMsg;
}
public Map<Long, Long> getBeStartTimeMap() {
return this.beLastStartTime;
}
public long getId() {
return id;
}
public UUID getQueryId() {
return queryId;
}
public long getDbId() {
return dbId;
}
public long getTableId() {
return this.tableId;
}
public JobState getState() {
return state;
}
public BrokerDesc getBrokerDesc() {
return brokerDesc;
}
public void setBrokerDesc(BrokerDesc brokerDesc) {
this.brokerDesc = brokerDesc;
}
public String getExportPath() {
return exportPath;
}
public String getColumnSeparator() {
return this.columnSeparator;
}
public String getRowDelimiter() {
return this.rowDelimiter;
}
public long getMemLimit() {
if (properties.containsKey(LoadStmt.LOAD_MEM_LIMIT)) {
return Long.parseLong(properties.get(LoadStmt.LOAD_MEM_LIMIT));
} else {
return 0;
}
}
public int getTimeoutSecond() {
if (properties.containsKey(LoadStmt.TIMEOUT_PROPERTY)) {
return Integer.parseInt(properties.get(LoadStmt.TIMEOUT_PROPERTY));
} else {
return Config.export_task_default_timeout_second;
}
}
public List<String> getPartitions() {
return partitions;
}
public List<String> getColumnNames() {
return columnNames;
}
public synchronized int getProgress() {
return progress;
}
public synchronized void setProgress(int progress) {
this.progress = progress;
}
public long getCreateTimeMs() {
return createTimeMs;
}
public long getStartTimeMs() {
return startTimeMs;
}
public long getFinishTimeMs() {
return finishTimeMs;
}
public ExportFailMsg getFailMsg() {
return failMsg;
}
public Set<String> getExportedTempFiles() {
return this.exportedTempFiles;
}
public String getExportedTempPath() {
return this.exportTempPath;
}
public Set<String> getExportedFiles() {
return this.exportedFiles;
}
public synchronized void addExportedTempFiles(List<String> files) {
exportedTempFiles.addAll(files);
LOG.debug("exported temp files: {}", this.exportedTempFiles);
}
public synchronized void clearExportedTempFiles() {
exportedTempFiles.clear();
}
public synchronized void addExportedFile(String file) {
exportedFiles.add(file);
LOG.debug("exported files: {}", this.exportedFiles);
}
public synchronized Thread getDoExportingThread() {
return doExportingThread;
}
public synchronized void setDoExportingThread(Thread isExportingThread) {
this.doExportingThread = isExportingThread;
}
public List<Coordinator> getCoordList() {
return coordList;
}
public List<TScanRangeLocations> getTabletLocations() {
return tabletLocations;
}
public List<Pair<TNetworkAddress, String>> getSnapshotPaths() {
return this.snapshotPaths;
}
public void addSnapshotPath(Pair<TNetworkAddress, String> snapshotPath) {
this.snapshotPaths.add(snapshotPath);
}
public String getSql() {
return sql;
}
public TableName getTableName() {
return tableName;
}
public synchronized boolean updateState(JobState newState) {
return this.updateState(newState, false, System.currentTimeMillis());
}
public synchronized boolean updateState(JobState newState, boolean isReplay, long stateChangeTime) {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return false;
}
state = newState;
switch (newState) {
case PENDING:
progress = 0;
break;
case EXPORTING:
startTimeMs = stateChangeTime;
break;
case FINISHED:
case CANCELLED:
finishTimeMs = stateChangeTime;
progress = 100;
break;
default:
Preconditions.checkState(false, "wrong job state: " + newState.name());
break;
}
if (!isReplay) {
GlobalStateMgr.getCurrentState().getEditLog().logExportUpdateState(id, newState, stateChangeTime,
snapshotPaths, exportTempPath, exportedFiles, failMsg);
}
return true;
}
public Status releaseSnapshots() {
switch (exportTable.getType()) {
case OLAP:
case MYSQL:
return releaseSnapshotPaths();
case CLOUD_NATIVE:
return releaseMetadataLocks();
default:
return Status.OK;
}
}
public Status releaseSnapshotPaths() {
List<Pair<TNetworkAddress, String>> snapshotPaths = getSnapshotPaths();
LOG.debug("snapshotPaths:{}", snapshotPaths);
for (Pair<TNetworkAddress, String> snapshotPath : snapshotPaths) {
TNetworkAddress address = snapshotPath.first;
String host = address.getHostname();
int port = address.getPort();
Backend backend = GlobalStateMgr.getCurrentSystemInfo().getBackendWithBePort(host, port);
if (backend == null) {
continue;
}
long backendId = backend.getId();
if (!GlobalStateMgr.getCurrentSystemInfo().checkBackendAvailable(backendId)) {
continue;
}
AgentClient client = new AgentClient(host, port);
TAgentResult result = client.releaseSnapshot(snapshotPath.second);
if (result == null || result.getStatus().getStatus_code() != TStatusCode.OK) {
continue;
}
}
snapshotPaths.clear();
return Status.OK;
}
public synchronized boolean isExportDone() {
return state == JobState.FINISHED || state == JobState.CANCELLED;
}
public synchronized void cancel(ExportFailMsg.CancelType type, String msg) throws UserException {
if (isExportDone()) {
throw new UserException("Export job [" + queryId.toString() + "] is already finished or cancelled");
}
cancelInternal(type, msg);
}
public synchronized void cancelInternal(ExportFailMsg.CancelType type, String msg) {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return;
}
try {
if (msg != null && failMsg.getCancelType() == ExportFailMsg.CancelType.UNKNOWN) {
failMsg = new ExportFailMsg(type, msg);
}
for (Coordinator coord : coordList) {
coord.cancel();
}
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportTempPath, brokerDesc);
} else {
BrokerUtil.deletePath(exportTempPath, brokerDesc);
}
LOG.info("remove export temp path success, path: {}", exportTempPath);
} catch (UserException e) {
LOG.warn("remove export temp path fail, path: {}", exportTempPath);
}
for (String exportedFile : exportedFiles) {
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportedFile, brokerDesc);
} else {
BrokerUtil.deletePath(exportedFile, brokerDesc);
}
LOG.info("remove exported file success, path: {}", exportedFile);
} catch (UserException e) {
LOG.warn("remove exported file fail, path: {}", exportedFile);
}
}
releaseSnapshots();
} finally {
updateState(ExportJob.JobState.CANCELLED);
LOG.info("export job cancelled. job: {}", this);
}
}
public synchronized void finish() {
if (isExportDone()) {
LOG.warn("export job state is finished or cancelled");
return;
}
try {
releaseSnapshots();
try {
if (!brokerDesc.hasBroker()) {
HdfsUtil.deletePath(exportTempPath, brokerDesc);
} else {
BrokerUtil.deletePath(exportTempPath, brokerDesc);
}
LOG.info("remove export temp path success, path: {}", exportTempPath);
} catch (UserException e) {
LOG.warn("remove export temp path fail, path: {}", exportTempPath);
}
} finally {
updateState(JobState.FINISHED);
LOG.info("export job finished. job: {}", this);
}
}
@Override
public String toString() {
return "ExportJob [jobId=" + id
+ ", dbId=" + dbId
+ ", tableId=" + tableId
+ ", state=" + state
+ ", path=" + exportPath
+ ", partitions=(" + StringUtils.join(partitions, ",") + ")"
+ ", progress=" + progress
+ ", createTimeMs=" + TimeUtils.longToTimeString(createTimeMs)
+ ", exportStartTimeMs=" + TimeUtils.longToTimeString(startTimeMs)
+ ", exportFinishTimeMs=" + TimeUtils.longToTimeString(finishTimeMs)
+ ", failMsg=" + failMsg
+ ", tmp files=(" + StringUtils.join(exportedTempFiles, ",") + ")"
+ ", files=(" + StringUtils.join(exportedFiles, ",") + ")"
+ "]";
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(id);
out.writeLong(dbId);
out.writeLong(tableId);
Text.writeString(out, exportPath);
Text.writeString(out, columnSeparator);
Text.writeString(out, rowDelimiter);
out.writeInt(properties.size());
for (Map.Entry<String, String> property : properties.entrySet()) {
Text.writeString(out, property.getKey());
Text.writeString(out, property.getValue());
}
boolean hasPartition = (partitions != null);
if (hasPartition) {
out.writeBoolean(true);
int partitionSize = partitions.size();
out.writeInt(partitionSize);
for (String partitionName : partitions) {
Text.writeString(out, partitionName);
}
} else {
out.writeBoolean(false);
}
Text.writeString(out, state.name());
out.writeLong(createTimeMs);
out.writeLong(startTimeMs);
out.writeLong(finishTimeMs);
out.writeInt(progress);
failMsg.write(out);
if (brokerDesc == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
brokerDesc.write(out);
}
tableName.write(out);
}
public void readFields(DataInput in) throws IOException {
isReplayed = true;
id = in.readLong();
dbId = in.readLong();
tableId = in.readLong();
exportPath = Text.readString(in);
columnSeparator = Text.readString(in);
rowDelimiter = Text.readString(in);
GlobalStateMgr stateMgr = GlobalStateMgr.getCurrentState();
Database db = null;
if (stateMgr.getMetadata() != null) {
db = stateMgr.getDb(dbId);
}
if (db != null) {
exportTable = db.getTable(tableId);
}
int count = in.readInt();
for (int i = 0; i < count; i++) {
String propertyKey = Text.readString(in);
String propertyValue = Text.readString(in);
this.properties.put(propertyKey, propertyValue);
}
boolean hasPartition = in.readBoolean();
if (hasPartition) {
partitions = Lists.newArrayList();
int partitionSize = in.readInt();
for (int i = 0; i < partitionSize; ++i) {
String partitionName = Text.readString(in);
partitions.add(partitionName);
}
}
state = JobState.valueOf(Text.readString(in));
createTimeMs = in.readLong();
startTimeMs = in.readLong();
finishTimeMs = in.readLong();
progress = in.readInt();
failMsg.readFields(in);
if (in.readBoolean()) {
brokerDesc = BrokerDesc.read(in);
}
tableName = new TableName();
tableName.readFields(in);
}
/**
* for ut only
*/
public void setTableName(TableName tableName) {
this.tableName = tableName;
}
@Override
public int hashCode() {
return Long.hashCode(id);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof ExportJob)) {
return false;
}
ExportJob job = (ExportJob) obj;
return this.id == job.id;
}
public boolean isReplayed() {
return isReplayed;
}
public boolean exportLakeTable() {
return exportTable.isCloudNativeTableOrMaterializedView();
}
public boolean exportOlapTable() {
return exportTable.isOlapTable();
}
public enum JobState {
PENDING,
EXPORTING,
FINISHED,
CANCELLED,
}
@Override
public void gsonPostProcess() throws IOException {
if (!Strings.isNullOrEmpty(queryIdString)) {
queryId = UUID.fromString(queryIdString);
}
isReplayed = true;
GlobalStateMgr stateMgr = GlobalStateMgr.getCurrentState();
Database db = null;
if (stateMgr.getMetadata() != null) {
db = stateMgr.getDb(dbId);
}
if (db != null) {
exportTable = db.getTable(tableId);
}
}
public static class StateTransfer implements Writable {
long jobId;
JobState state;
public StateTransfer() {
this.jobId = -1;
this.state = JobState.CANCELLED;
}
public StateTransfer(long jobId, JobState state) {
this.jobId = jobId;
this.state = state;
}
public long getJobId() {
return jobId;
}
public JobState getState() {
return state;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(jobId);
Text.writeString(out, state.name());
}
public void readFields(DataInput in) throws IOException {
jobId = in.readLong();
state = JobState.valueOf(Text.readString(in));
}
}
public static class ExportUpdateInfo implements Writable {
@SerializedName("jobId")
long jobId;
@SerializedName("state")
JobState state;
@SerializedName("stateChangeTime")
long stateChangeTime;
@SerializedName("snapshotPaths")
List<Pair<NetworkAddress, String>> snapshotPaths;
@SerializedName("exportTempPath")
String exportTempPath;
@SerializedName("exportedFiles")
Set<String> exportedFiles;
@SerializedName("failMsg")
ExportFailMsg failMsg;
public ExportUpdateInfo() {
this.jobId = -1;
this.state = JobState.CANCELLED;
this.snapshotPaths = Lists.newArrayList();
this.exportTempPath = "";
this.exportedFiles = Sets.newConcurrentHashSet();
this.failMsg = new ExportFailMsg();
}
public ExportUpdateInfo(long jobId, JobState state, long stateChangeTime,
List<Pair<TNetworkAddress, String>> snapshotPaths,
String exportTempPath, Set<String> exportedFiles, ExportFailMsg failMsg) {
this.jobId = jobId;
this.state = state;
this.stateChangeTime = stateChangeTime;
this.snapshotPaths = serialize(snapshotPaths);
this.exportTempPath = exportTempPath;
this.exportedFiles = exportedFiles;
this.failMsg = failMsg;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, ExportUpdateInfo.class);
Text.writeString(out, json);
out.writeInt(snapshotPaths.size());
for (Pair<NetworkAddress, String> entry : snapshotPaths) {
Text.writeString(out, entry.first.hostname);
out.writeInt(entry.first.port);
Text.writeString(out, entry.second);
}
}
public static ExportUpdateInfo read(DataInput input) throws IOException {
ExportUpdateInfo info = GsonUtils.GSON.fromJson(Text.readString(input), ExportUpdateInfo.class);
int snapshotPathsLen = input.readInt();
for (int i = 0; i < snapshotPathsLen; i++) {
String hostName = Text.readString(input);
int port = input.readInt();
String path = Text.readString(input);
Pair<NetworkAddress, String> entry = Pair.create(new NetworkAddress(hostName, port), path);
info.snapshotPaths.set(i, entry);
}
return info;
}
public List<Pair<NetworkAddress, String>> serialize(List<Pair<TNetworkAddress, String>> snapshotPaths) {
return snapshotPaths
.stream()
.map(snapshotPath
-> Pair.create(new NetworkAddress(snapshotPath.first.hostname, snapshotPath.first.port),
snapshotPath.second))
.collect(Collectors.toList());
}
public List<Pair<TNetworkAddress, String>> deserialize(List<Pair<NetworkAddress, String>> snapshotPaths) {
return snapshotPaths
.stream()
.map(snapshotPath
-> Pair.create(new TNetworkAddress(snapshotPath.first.hostname, snapshotPath.first.port),
snapshotPath.second))
.collect(Collectors.toList());
}
}
public static class NetworkAddress {
@SerializedName("h")
String hostname;
@SerializedName("p")
int port;
public NetworkAddress() {
}
public NetworkAddress(String hostname, int port) {
this.hostname = hostname;
this.port = port;
}
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
@Override
public boolean equals(Object obj) {
return obj instanceof NetworkAddress
&& this.hostname.equals(((NetworkAddress) obj).hostname)
&& this.port == ((NetworkAddress) obj).port;
}
@Override
public int hashCode() {
return Objects.hash(hostname, port);
}
@Override
public String toString() {
return hostname + ":" + port;
}
}
}
|
we can call it directly `this(className);`
|
public HiveFunctionWrapper(String className, UDFType serializableInstance) {
this.className = className;
Preconditions.checkArgument(
serializableInstance instanceof Serializable,
String.format(
"The UDF %s should be an instance of Serializable.",
serializableInstance.getClass().getName()));
this.udfSerializedString =
SerializationUtilities.serializeObject((Serializable) serializableInstance);
}
|
this.className = className;
|
public HiveFunctionWrapper(String className, UDFType serializableInstance) {
this(className);
Preconditions.checkArgument(
serializableInstance.getClass().getName().equals(className),
String.format(
"Expect the UDF is instance of %s, but is instance of %s.",
className, serializableInstance.getClass().getName()));
Preconditions.checkArgument(
serializableInstance instanceof Serializable,
String.format(
"The UDF %s should be an instance of Serializable.",
serializableInstance.getClass().getName()));
this.udfSerializedString =
SerializationUtilities.serializeObject((Serializable) serializableInstance);
}
|
class HiveFunctionWrapper<UDFType> implements Serializable {
public static final long serialVersionUID = 393313529306818205L;
private final String className;
private String udfSerializedString;
private transient UDFType instance = null;
public HiveFunctionWrapper(String className) {
this.className = className;
}
/**
* Create a HiveFunctionWrapper with a UDF instance. In this constructor, the instance will be
* serialized to string and held on in the HiveFunctionWrapper.
*/
/**
* Instantiate a Hive function instance.
*
* @return a Hive function instance
*/
public UDFType createFunction() {
if (udfSerializedString != null) {
return deserializeUDF();
} else if (instance != null) {
return instance;
} else {
UDFType func;
try {
func = getUDFClass().newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new FlinkHiveUDFException(
String.format("Failed to create function from %s", className), e);
}
if (!(func instanceof UDF)) {
instance = func;
}
return func;
}
}
/**
* Get class name of the Hive function.
*
* @return class name of the Hive function
*/
public String getClassName() {
return className;
}
/**
* Get class of the Hive function.
*
* @return class of the Hive function
* @throws ClassNotFoundException thrown when the class is not found in classpath
*/
public Class<UDFType> getUDFClass() throws ClassNotFoundException {
return (Class<UDFType>) Thread.currentThread().getContextClassLoader().loadClass(className);
}
/**
* Deserialize UDF used the udfSerializedString held on.
*
* @return the UDF deserialized
*/
private UDFType deserializeUDF() {
try {
return (UDFType)
SerializationUtilities.deserializeObject(
udfSerializedString, (Class<Serializable>) getUDFClass());
} catch (ClassNotFoundException e) {
throw new FlinkHiveUDFException(
String.format("Failed to deserialize function %s", className), e);
}
}
}
|
class HiveFunctionWrapper<UDFType> implements Serializable {
public static final long serialVersionUID = 393313529306818205L;
private final String className;
private String udfSerializedString;
private transient UDFType instance = null;
public HiveFunctionWrapper(String className) {
this.className = className;
}
/**
* Create a HiveFunctionWrapper with a UDF instance. In this constructor, the instance will be
* serialized to string and held on in the HiveFunctionWrapper.
*/
/**
* Instantiate a Hive function instance.
*
* @return a Hive function instance
*/
public UDFType createFunction() {
if (udfSerializedString != null) {
return deserializeUDF();
} else if (instance != null) {
return instance;
} else {
UDFType func;
try {
func = getUDFClass().newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new FlinkHiveUDFException(
String.format("Failed to create function from %s", className), e);
}
if (!(func instanceof UDF)) {
instance = func;
}
return func;
}
}
/**
* Get class name of the Hive function.
*
* @return class name of the Hive function
*/
public String getClassName() {
return className;
}
/**
* Get class of the Hive function.
*
* @return class of the Hive function
* @throws ClassNotFoundException thrown when the class is not found in classpath
*/
public Class<UDFType> getUDFClass() throws ClassNotFoundException {
return (Class<UDFType>) Thread.currentThread().getContextClassLoader().loadClass(className);
}
/**
* Deserialize UDF used the udfSerializedString held on.
*
* @return the UDF deserialized
*/
private UDFType deserializeUDF() {
try {
return (UDFType)
SerializationUtilities.deserializeObject(
udfSerializedString, (Class<Serializable>) getUDFClass());
} catch (ClassNotFoundException e) {
throw new FlinkHiveUDFException(
String.format("Failed to deserialize function %s.", className), e);
}
}
}
|
I believe 404 statusCode is enough and the core tracer will populate all the info needed. https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core-tracing-opentelemetry/src/main/java/com/azure/core/tracing/opentelemetry/implementation/HttpTraceUtil.java#L179 Can we avoid populating custom error codes?
|
private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (statusCode == HttpConstants.StatusCodes.NOTFOUND) {
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, null, context);
} else {
tracer.setAttribute(TracerProvider.ERROR_MSG, throwable.getMessage(), context);
tracer.setAttribute(TracerProvider.ERROR_TYPE, throwable.getClass().getName(), context);
tracer.end(statusCode, throwable, context);
}
} else {
tracer.end(statusCode, null, context);
}
}
|
tracer.setAttribute(TracerProvider.ERROR_MSG, "Not found exception", context);
|
private void end(int statusCode, Throwable throwable, Context context) {
if (throwable != null) {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && cosmosException.getSubStatusCode() == 0) {
tracer.end(statusCode, null, context);
return;
}
}
}
tracer.end(statusCode, throwable, context);
}
|
class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String ERROR_MSG = "error.msg";
public static final String ERROR_TYPE = "error.type";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
|
class TracerProvider {
private Tracer tracer;
private static final Logger LOGGER = LoggerFactory.getLogger(TracerProvider.class);
private static final ObjectMapper mapper = new ObjectMapper();
private final static String JSON_STRING = "JSON";
public final static String DB_TYPE_VALUE = "Cosmos";
public final static String DB_TYPE = "db.type";
public final static String DB_INSTANCE = "db.instance";
public final static String DB_URL = "db.url";
public static final String DB_STATEMENT = "db.statement";
public static final String COSMOS_CALL_DEPTH = "cosmosCallDepth";
public static final String COSMOS_CALL_DEPTH_VAL = "nested";
public static final int ERROR_CODE = 0;
public static final String RESOURCE_PROVIDER_NAME = "Microsoft.DocumentDB";
public final Duration CRUD_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(100);
public final Duration QUERY_THRESHOLD_FOR_DIAGNOSTICS = Duration.ofMillis(500);
public TracerProvider(Tracer tracer) {
this.tracer = tracer;
}
public boolean isEnabled() {
return tracer != null;
}
/**
* For each tracer plugged into the SDK a new tracing span is created.
* <p>
* The {@code context} will be checked for containing information about a parent span. If a parent span is found the
* new span will be added as a child, otherwise the span will be created and added to the context and any downstream
* start calls will use the created span as the parent.
*
* @param context Additional metadata that is passed through the call stack.
* @return An updated context object.
*/
public Context startSpan(String methodName, String databaseId, String endpoint, Context context) {
Context local = Objects.requireNonNull(context, "'context' cannot be null.");
local = local.addData(AZ_TRACING_NAMESPACE_KEY, RESOURCE_PROVIDER_NAME);
local = tracer.start(methodName, local);
if (databaseId != null) {
tracer.setAttribute(TracerProvider.DB_INSTANCE, databaseId, local);
}
tracer.setAttribute(TracerProvider.DB_TYPE, DB_TYPE_VALUE, local);
tracer.setAttribute(TracerProvider.DB_URL, endpoint, local);
tracer.setAttribute(TracerProvider.DB_STATEMENT, methodName, local);
return local;
}
/**
* Adds an event to the current span with the provided {@code timestamp} and {@code attributes}.
* <p>This API does not provide any normalization if provided timestamps are out of range of the current
* span timeline</p>
* <p>Supported attribute values include String, double, boolean, long, String [], double [], long [].
* Any other Object value type and null values will be silently ignored.</p>
*
* @param name the name of the event.
* @param attributes the additional attributes to be set for the event.
* @param timestamp The instant, in UTC, at which the event will be associated to the span.
* @param context the call metadata containing information of the span to which the event should be associated with.
* @throws NullPointerException if {@code eventName} is {@code null}.
*/
public void addEvent(String name, Map<String, Object> attributes, OffsetDateTime timestamp, Context context) {
tracer.addEvent(name, attributes, timestamp, context);
}
/**
* Given a context containing the current tracing span the span is marked completed with status info from
* {@link Signal}. For each tracer plugged into the SDK the current tracing span is marked as completed.
*
* @param context Additional metadata that is passed through the call stack.
* @param signal The signal indicates the status and contains the metadata we need to end the tracing span.
*/
public <T extends CosmosResponse<? extends Resource>> void endSpan(Context context,
Signal<T> signal,
int statusCode) {
Objects.requireNonNull(context, "'context' cannot be null.");
Objects.requireNonNull(signal, "'signal' cannot be null.");
switch (signal.getType()) {
case ON_COMPLETE:
end(statusCode, null, context);
break;
case ON_ERROR:
Throwable throwable = null;
if (signal.hasError()) {
throwable = signal.getThrowable();
if (throwable instanceof CosmosException) {
CosmosException exception = (CosmosException) throwable;
statusCode = exception.getStatusCode();
}
}
end(statusCode, throwable, context);
break;
default:
break;
}
}
public <T extends CosmosResponse<?>> Mono<T> traceEnabledCosmosResponsePublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint) {
return traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint,
(T response) -> response.getStatusCode(), (T response) -> response.getDiagnostics(), null);
}
public Mono<CosmosBatchResponse> traceEnabledBatchResponsePublisher(Mono<CosmosBatchResponse> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosBatchResponse::getStatusCode,
CosmosBatchResponse::getDiagnostics,
null);
}
public <T> Mono<CosmosItemResponse<T>> traceEnabledCosmosItemResponsePublisher(Mono<CosmosItemResponse<T>> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Duration thresholdForDiagnosticsOnTracer) {
return publisherWithClientTelemetry(resultPublisher, context, spanName, containerId, databaseId,
BridgeInternal.getServiceEndpoint(client),
client,
consistencyLevel,
operationType,
resourceType,
CosmosItemResponse::getStatusCode,
CosmosItemResponse::getDiagnostics,
thresholdForDiagnosticsOnTracer);
}
private <T> Mono<T> traceEnabledPublisher(Mono<T> resultPublisher,
Context context,
String spanName,
String databaseId,
String endpoint,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE);
Optional<Object> callDepth = context.getData(COSMOS_CALL_DEPTH);
final boolean isNestedCall = callDepth.isPresent();
return resultPublisher
.doOnSubscribe(ignoredValue -> {
if (isEnabled() && !isNestedCall) {
parentContext.set(this.startSpan(spanName, databaseId, endpoint,
context));
}
}).doOnSuccess(response -> {
if (isEnabled() && !isNestedCall) {
CosmosDiagnostics cosmosDiagnostics = diagnosticFunc.apply(response);
try {
Duration threshold = thresholdForDiagnosticsOnTracer;
if(threshold == null) {
threshold = CRUD_THRESHOLD_FOR_DIAGNOSTICS;
}
if (cosmosDiagnostics != null
&& cosmosDiagnostics.getDuration() != null
&& cosmosDiagnostics.getDuration().compareTo(threshold) > 0) {
addDiagnosticsOnTracerEvent(cosmosDiagnostics, parentContext.get());
}
} catch (JsonProcessingException ex) {
LOGGER.warn("Error while serializing diagnostics for tracer", ex.getMessage());
}
this.endSpan(parentContext.get(), Signal.complete(), statusCodeFunc.apply(response));
}
}).doOnError(throwable -> {
if (isEnabled() && !isNestedCall) {
this.endSpan(parentContext.get(), Signal.error(throwable), ERROR_CODE);
}
});
}
private <T> Mono<T> publisherWithClientTelemetry(Mono<T> resultPublisher,
Context context,
String spanName,
String containerId,
String databaseId,
String endpoint,
CosmosAsyncClient client,
ConsistencyLevel consistencyLevel,
OperationType operationType,
ResourceType resourceType,
Function<T, Integer> statusCodeFunc,
Function<T, CosmosDiagnostics> diagnosticFunc,
Duration thresholdForDiagnosticsOnTracer) {
Mono<T> tracerMono = traceEnabledPublisher(resultPublisher, context, spanName, databaseId, endpoint, statusCodeFunc, diagnosticFunc, thresholdForDiagnosticsOnTracer);
return tracerMono
.doOnSuccess(response -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosItemResponse) {
@SuppressWarnings("unchecked")
CosmosItemResponse<T> itemResponse = (CosmosItemResponse<T>) response;
fillClientTelemetry(client, itemResponse.getDiagnostics(), itemResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(itemResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) itemResponse.getRequestCharge());
} else if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && response instanceof CosmosBatchResponse) {
@SuppressWarnings("unchecked")
CosmosBatchResponse cosmosBatchResponse = (CosmosBatchResponse) response;
fillClientTelemetry(client, cosmosBatchResponse.getDiagnostics(), cosmosBatchResponse.getStatusCode(),
ModelBridgeInternal.getPayloadLength(cosmosBatchResponse), containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosBatchResponse.getRequestCharge());
}
}).doOnError(throwable -> {
if (Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(client)) && throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) throwable;
fillClientTelemetry(client, cosmosException.getDiagnostics(), cosmosException.getStatusCode(),
null, containerId,
databaseId, operationType, resourceType, consistencyLevel,
(float) cosmosException.getRequestCharge());
}
});
}
private void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
float requestCharge) {
ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry();
ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME,
ClientTelemetry.REQUEST_LATENCY_UNIT);
ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency);
if (latencyHistogram != null) {
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
} else {
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_SUCCESS_STATUSCODE && statusCode <= HttpConstants.StatusCodes.MAXIMUM_SUCCESS_STATUSCODE) {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION);
} else {
latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION);
}
latencyHistogram.setAutoResize(true);
ClientTelemetry.recordValue(latencyHistogram, cosmosDiagnostics.getDuration().toNanos()/1000);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram);
}
ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, cosmosDiagnostics,
statusCode, objectSize, containerId, databaseId
, operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT);
ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge);
if (requestChargeHistogram != null) {
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
} else {
requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION);
requestChargeHistogram.setAutoResize(true);
ClientTelemetry.recordValue(requestChargeHistogram, requestCharge);
telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge,
requestChargeHistogram);
}
}
private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient,
CosmosDiagnostics cosmosDiagnostics,
int statusCode,
Integer objectSize,
String containerId,
String databaseId,
OperationType operationType,
ResourceType resourceType,
ConsistencyLevel consistencyLevel,
String metricsName,
String unitName) {
ReportPayload reportPayload = new ReportPayload(metricsName, unitName);
reportPayload.setRegionsContacted(BridgeInternal.getRegionsContacted(cosmosDiagnostics).toString());
reportPayload.setConsistency(consistencyLevel == null ?
BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() :
consistencyLevel);
if (objectSize != null) {
reportPayload.setGreaterThan1Kb(objectSize > ClientTelemetry.ONE_KB_TO_BYTES);
}
reportPayload.setDatabaseName(databaseId);
reportPayload.setContainerName(containerId);
reportPayload.setOperation(operationType);
reportPayload.setResource(resourceType);
reportPayload.setStatusCode(statusCode);
return reportPayload;
}
private void addDiagnosticsOnTracerEvent(CosmosDiagnostics cosmosDiagnostics, Context context) throws JsonProcessingException {
if (cosmosDiagnostics == null) {
return;
}
ClientSideRequestStatistics clientSideRequestStatistics =
BridgeInternal.getClientSideRequestStatics(cosmosDiagnostics);
Map<String, Object> attributes = null;
int diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics storeResponseStatistics :
clientSideRequestStatistics.getResponseStatisticsList()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(storeResponseStatistics));
Iterator<RequestTimeline.Event> eventIterator = null;
try {
if (storeResponseStatistics.getStoreResult() != null) {
eventIterator =
DirectBridgeInternal.getRequestTimeline(storeResponseStatistics.getStoreResult().toResponse()).iterator();
}
} catch (CosmosException ex) {
eventIterator = BridgeInternal.getRequestTimeline(ex).iterator();
}
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(storeResponseStatistics.getRequestResponseTimeUTC()
, ZoneOffset.UTC);
if (eventIterator != null) {
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.StoreResponseStatistics statistics :
ClientSideRequestStatistics.getCappedSupplementalResponseStatisticsList(clientSideRequestStatistics.getSupplementalResponseStatisticsList())) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(statistics));
OffsetDateTime requestStartTime = OffsetDateTime.ofInstant(statistics.getRequestResponseTimeUTC(),
ZoneOffset.UTC);
if (statistics.getStoreResult() != null) {
Iterator<RequestTimeline.Event> eventIterator =
DirectBridgeInternal.getRequestTimeline(statistics.getStoreResult().toResponse()).iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("Supplemental StoreResponse" + diagnosticsCounter++, attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getGatewayStatistics() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getGatewayStatistics()));
OffsetDateTime requestStartTime =
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC);
if (clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline() != null) {
Iterator<RequestTimeline.Event> eventIterator =
clientSideRequestStatistics.getGatewayStatistics().getRequestTimeline().iterator();
while (eventIterator.hasNext()) {
RequestTimeline.Event event = eventIterator.next();
if (event.getName().equals("created")) {
requestStartTime = OffsetDateTime.ofInstant(event.getStartTime(), ZoneOffset.UTC);
break;
}
}
}
this.addEvent("GatewayStatistics", attributes, requestStartTime, context);
}
if (clientSideRequestStatistics.getRetryContext().getRetryStartTime() != null) {
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRetryContext()));
this.addEvent("Retry Context", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRetryContext().getRetryStartTime(),
ZoneOffset.UTC), context);
}
diagnosticsCounter = 1;
for (ClientSideRequestStatistics.AddressResolutionStatistics addressResolutionStatistics :
clientSideRequestStatistics.getAddressResolutionStatistics().values()) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(addressResolutionStatistics));
this.addEvent("AddressResolutionStatistics" + diagnosticsCounter++, attributes,
OffsetDateTime.ofInstant(addressResolutionStatistics.getStartTimeUTC(), ZoneOffset.UTC), context);
}
if (clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList != null) {
for (SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics :
clientSideRequestStatistics.getSerializationDiagnosticsContext().serializationDiagnosticsList) {
attributes = new HashMap<>();
attributes.put(JSON_STRING, mapper.writeValueAsString(serializationDiagnostics));
this.addEvent("SerializationDiagnostics " + serializationDiagnostics.serializationType, attributes,
OffsetDateTime.ofInstant(serializationDiagnostics.startTimeUTC, ZoneOffset.UTC), context);
}
}
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getRegionsContacted()));
this.addEvent("RegionContacted", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(ClientSideRequestStatistics.fetchSystemInformation()));
this.addEvent("SystemInformation", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
attributes = new HashMap<>();
attributes.put(JSON_STRING,
mapper.writeValueAsString(clientSideRequestStatistics.getDiagnosticsClientContext()));
this.addEvent("ClientCfgs", attributes,
OffsetDateTime.ofInstant(clientSideRequestStatistics.getRequestStartTimeUTC(), ZoneOffset.UTC), context);
}
}
|
Yeah I perfectly agree the cost is very small but small costs pile up and when there is absolutely no reason to pay this cost, I think we should avoid it. Certainly not worth blocking the PR thus why I approved anyway and let you decide what to do :). (I wouldn't have asked if it made the code worse or more complex)
|
String replacePathParameters(String path) {
if (path.isEmpty()) {
return "";
}
StringBuilder sb = new StringBuilder();
Matcher m = PATH_PARAM_PATTERN.matcher(path);
while (m.find()) {
String match = m.group();
String paramName = match.substring(1, match.length() - 1);
String val = pathParams.get(paramName);
if (val == null) {
throw new WebSocketClientException("Unable to obtain the path param for: " + paramName);
}
m.appendReplacement(sb, URLEncoder.encode(val, StandardCharsets.UTF_8));
}
m.appendTail(sb);
return path.startsWith("/") ? sb.toString() : "/" + sb.toString();
}
|
return "";
|
String replacePathParameters(String path) {
if (path.isEmpty()) {
return path;
}
StringBuilder sb = new StringBuilder();
Matcher m = PATH_PARAM_PATTERN.matcher(path);
while (m.find()) {
String match = m.group();
String paramName = match.substring(1, match.length() - 1);
String val = pathParams.get(paramName);
if (val == null) {
throw new WebSocketClientException("Unable to obtain the path param for: " + paramName);
}
m.appendReplacement(sb, URLEncoder.encode(val, StandardCharsets.UTF_8));
}
m.appendTail(sb);
return path.startsWith("/") ? sb.toString() : "/" + sb.toString();
}
|
class WebSocketConnectorBase<THIS extends WebSocketConnectorBase<THIS>> {
protected static final Pattern PATH_PARAM_PATTERN = Pattern.compile("\\{[a-zA-Z0-9_]+\\}");
protected URI baseUri;
protected final Map<String, String> pathParams;
protected final Map<String, List<String>> headers;
protected final Set<String> subprotocols;
protected String path;
protected Set<String> pathParamNames;
protected final Vertx vertx;
protected final Codecs codecs;
protected final ClientConnectionManager connectionManager;
protected final WebSocketsClientRuntimeConfig config;
protected final TlsConfigurationRegistry tlsConfigurationRegistry;
WebSocketConnectorBase(Vertx vertx, Codecs codecs,
ClientConnectionManager connectionManager, WebSocketsClientRuntimeConfig config,
TlsConfigurationRegistry tlsConfigurationRegistry) {
this.headers = new HashMap<>();
this.subprotocols = new HashSet<>();
this.pathParams = new HashMap<>();
this.vertx = vertx;
this.codecs = codecs;
this.connectionManager = connectionManager;
this.config = config;
this.tlsConfigurationRegistry = tlsConfigurationRegistry;
this.path = "";
this.pathParamNames = Set.of();
}
public THIS baseUri(URI baseUri) {
this.baseUri = Objects.requireNonNull(baseUri);
return self();
}
public THIS addHeader(String name, String value) {
Objects.requireNonNull(name);
Objects.requireNonNull(value);
List<String> values = headers.get(name);
if (values == null) {
values = new ArrayList<>();
headers.put(name, values);
}
values.add(value);
return self();
}
public THIS pathParam(String name, String value) {
Objects.requireNonNull(name);
Objects.requireNonNull(value);
if (!pathParamNames.contains(name)) {
throw new IllegalArgumentException(
String.format("[%s] is not a valid path parameter in the path %s", name, path));
}
pathParams.put(name, value);
return self();
}
public THIS addSubprotocol(String value) {
subprotocols.add(Objects.requireNonNull(value));
return self();
}
void setPath(String path) {
this.path = Objects.requireNonNull(path);
this.pathParamNames = getPathParamNames(path);
}
@SuppressWarnings("unchecked")
protected THIS self() {
return (THIS) this;
}
Set<String> getPathParamNames(String path) {
Set<String> names = new HashSet<>();
Matcher m = PATH_PARAM_PATTERN.matcher(path);
while (m.find()) {
String match = m.group();
String paramName = match.substring(1, match.length() - 1);
names.add(paramName);
}
return names;
}
protected WebSocketClientOptions populateClientOptions() {
WebSocketClientOptions clientOptions = new WebSocketClientOptions();
if (config.offerPerMessageCompression()) {
clientOptions.setTryUsePerMessageCompression(true);
if (config.compressionLevel().isPresent()) {
clientOptions.setCompressionLevel(config.compressionLevel().getAsInt());
}
}
if (config.maxMessageSize().isPresent()) {
clientOptions.setMaxMessageSize(config.maxMessageSize().getAsInt());
}
Optional<TlsConfiguration> maybeTlsConfiguration = TlsConfiguration.from(tlsConfigurationRegistry,
config.tlsConfigurationName());
if (maybeTlsConfiguration.isPresent()) {
TlsConfigUtils.configure(clientOptions, maybeTlsConfiguration.get());
}
return clientOptions;
}
protected WebSocketConnectOptions newConnectOptions(URI serverEndpointUri) {
WebSocketConnectOptions connectOptions = new WebSocketConnectOptions()
.setSsl(isHttps(serverEndpointUri))
.setHost(serverEndpointUri.getHost());
if (serverEndpointUri.getPort() != -1) {
connectOptions.setPort(serverEndpointUri.getPort());
} else if (isHttps(serverEndpointUri)) {
connectOptions.setPort(443);
}
return connectOptions;
}
protected boolean isHttps(URI uri) {
return "https".equals(uri.getScheme());
}
}
|
class WebSocketConnectorBase<THIS extends WebSocketConnectorBase<THIS>> {
protected static final Pattern PATH_PARAM_PATTERN = Pattern.compile("\\{[a-zA-Z0-9_]+\\}");
protected URI baseUri;
protected final Map<String, String> pathParams;
protected final Map<String, List<String>> headers;
protected final Set<String> subprotocols;
protected String path;
protected Set<String> pathParamNames;
protected final Vertx vertx;
protected final Codecs codecs;
protected final ClientConnectionManager connectionManager;
protected final WebSocketsClientRuntimeConfig config;
protected final TlsConfigurationRegistry tlsConfigurationRegistry;
WebSocketConnectorBase(Vertx vertx, Codecs codecs,
ClientConnectionManager connectionManager, WebSocketsClientRuntimeConfig config,
TlsConfigurationRegistry tlsConfigurationRegistry) {
this.headers = new HashMap<>();
this.subprotocols = new HashSet<>();
this.pathParams = new HashMap<>();
this.vertx = vertx;
this.codecs = codecs;
this.connectionManager = connectionManager;
this.config = config;
this.tlsConfigurationRegistry = tlsConfigurationRegistry;
this.path = "";
this.pathParamNames = Set.of();
}
public THIS baseUri(URI baseUri) {
this.baseUri = Objects.requireNonNull(baseUri);
return self();
}
public THIS addHeader(String name, String value) {
Objects.requireNonNull(name);
Objects.requireNonNull(value);
List<String> values = headers.get(name);
if (values == null) {
values = new ArrayList<>();
headers.put(name, values);
}
values.add(value);
return self();
}
public THIS pathParam(String name, String value) {
Objects.requireNonNull(name);
Objects.requireNonNull(value);
if (!pathParamNames.contains(name)) {
throw new IllegalArgumentException(
String.format("[%s] is not a valid path parameter in the path %s", name, path));
}
pathParams.put(name, value);
return self();
}
public THIS addSubprotocol(String value) {
subprotocols.add(Objects.requireNonNull(value));
return self();
}
void setPath(String path) {
this.path = Objects.requireNonNull(path);
this.pathParamNames = getPathParamNames(path);
}
@SuppressWarnings("unchecked")
protected THIS self() {
return (THIS) this;
}
Set<String> getPathParamNames(String path) {
Set<String> names = new HashSet<>();
Matcher m = PATH_PARAM_PATTERN.matcher(path);
while (m.find()) {
String match = m.group();
String paramName = match.substring(1, match.length() - 1);
names.add(paramName);
}
return names;
}
protected WebSocketClientOptions populateClientOptions() {
WebSocketClientOptions clientOptions = new WebSocketClientOptions();
if (config.offerPerMessageCompression()) {
clientOptions.setTryUsePerMessageCompression(true);
if (config.compressionLevel().isPresent()) {
clientOptions.setCompressionLevel(config.compressionLevel().getAsInt());
}
}
if (config.maxMessageSize().isPresent()) {
clientOptions.setMaxMessageSize(config.maxMessageSize().getAsInt());
}
Optional<TlsConfiguration> maybeTlsConfiguration = TlsConfiguration.from(tlsConfigurationRegistry,
config.tlsConfigurationName());
if (maybeTlsConfiguration.isPresent()) {
TlsConfigUtils.configure(clientOptions, maybeTlsConfiguration.get());
}
return clientOptions;
}
protected WebSocketConnectOptions newConnectOptions(URI serverEndpointUri) {
WebSocketConnectOptions connectOptions = new WebSocketConnectOptions()
.setSsl(isHttps(serverEndpointUri))
.setHost(serverEndpointUri.getHost());
if (serverEndpointUri.getPort() != -1) {
connectOptions.setPort(serverEndpointUri.getPort());
} else if (isHttps(serverEndpointUri)) {
connectOptions.setPort(443);
}
return connectOptions;
}
protected boolean isHttps(URI uri) {
return "https".equals(uri.getScheme());
}
}
|
But I can't get `SqlTypeName` from a `SqlBasicTypeNameSpec`
|
private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec nameSpec) {
if (nameSpec instanceof SqlBasicTypeNameSpec) {
SqlBasicTypeNameSpec basicNameSpec = (SqlBasicTypeNameSpec) nameSpec;
if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name())) {
if (basicNameSpec.getPrecision() < 0) {
nameSpec = new SqlBasicTypeNameSpec(SqlTypeName.TIMESTAMP, 9, basicNameSpec.getScale(),
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
}
} else if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.BINARY.name())) {
if (basicNameSpec.getPrecision() < 0) {
nameSpec = new SqlBasicTypeNameSpec(SqlTypeName.VARBINARY, Integer.MAX_VALUE, basicNameSpec.getScale(),
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
}
}
} else if (nameSpec instanceof ExtendedSqlCollectionTypeNameSpec) {
ExtendedSqlCollectionTypeNameSpec collectionNameSpec = (ExtendedSqlCollectionTypeNameSpec) nameSpec;
SqlTypeNameSpec elementNameSpec = collectionNameSpec.getElementTypeName();
SqlTypeNameSpec convertedElementNameSpec = convertDataTypes(elementNameSpec);
if (convertedElementNameSpec != elementNameSpec) {
nameSpec = new ExtendedSqlCollectionTypeNameSpec(convertedElementNameSpec,
collectionNameSpec.elementNullable(), collectionNameSpec.getCollectionTypeName(),
collectionNameSpec.unparseAsStandard(), collectionNameSpec.getParserPos());
}
} else if (nameSpec instanceof SqlMapTypeNameSpec) {
SqlMapTypeNameSpec mapNameSpec = (SqlMapTypeNameSpec) nameSpec;
SqlDataTypeSpec keyTypeSpec = mapNameSpec.getKeyType();
SqlDataTypeSpec valTypeSpec = mapNameSpec.getValType();
SqlDataTypeSpec convertedKeyTypeSpec = convertDataTypes(keyTypeSpec);
SqlDataTypeSpec convertedValTypeSpec = convertDataTypes(valTypeSpec);
if (keyTypeSpec != convertedKeyTypeSpec || valTypeSpec != convertedValTypeSpec) {
nameSpec = new SqlMapTypeNameSpec(convertedKeyTypeSpec, convertedValTypeSpec, nameSpec.getParserPos());
}
} else if (nameSpec instanceof ExtendedSqlRowTypeNameSpec) {
ExtendedSqlRowTypeNameSpec rowNameSpec = (ExtendedSqlRowTypeNameSpec) nameSpec;
List<SqlDataTypeSpec> fieldTypeSpecs = rowNameSpec.getFieldTypes();
List<SqlDataTypeSpec> convertedFieldTypeSpecs = new ArrayList<>(fieldTypeSpecs.size());
boolean updated = false;
for (SqlDataTypeSpec fieldTypeSpec : fieldTypeSpecs) {
SqlDataTypeSpec convertedFieldTypeSpec = convertDataTypes(fieldTypeSpec);
if (fieldTypeSpec != convertedFieldTypeSpec) {
updated = true;
}
convertedFieldTypeSpecs.add(convertedFieldTypeSpec);
}
if (updated) {
nameSpec = new ExtendedSqlRowTypeNameSpec(nameSpec.getParserPos(), rowNameSpec.getFieldNames(),
convertedFieldTypeSpecs, rowNameSpec.getComments(), rowNameSpec.unparseAsStandard());
}
}
return nameSpec;
}
|
if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name())) {
|
private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec nameSpec) throws ParseException {
if (nameSpec instanceof SqlBasicTypeNameSpec) {
SqlBasicTypeNameSpec basicNameSpec = (SqlBasicTypeNameSpec) nameSpec;
if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name())) {
if (basicNameSpec.getPrecision() < 0) {
nameSpec = new SqlBasicTypeNameSpec(SqlTypeName.TIMESTAMP, 9, basicNameSpec.getScale(),
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
}
} else if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.BINARY.name())) {
if (basicNameSpec.getPrecision() < 0) {
nameSpec = new SqlBasicTypeNameSpec(SqlTypeName.VARBINARY, Integer.MAX_VALUE, basicNameSpec.getScale(),
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
}
} else if (basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.VARCHAR.name())) {
if (basicNameSpec.getPrecision() < 0) {
throw new ParseException("VARCHAR precision is mandatory");
}
}
} else if (nameSpec instanceof ExtendedSqlCollectionTypeNameSpec) {
ExtendedSqlCollectionTypeNameSpec collectionNameSpec = (ExtendedSqlCollectionTypeNameSpec) nameSpec;
SqlTypeNameSpec elementNameSpec = collectionNameSpec.getElementTypeName();
SqlTypeNameSpec convertedElementNameSpec = convertDataTypes(elementNameSpec);
if (convertedElementNameSpec != elementNameSpec) {
nameSpec = new ExtendedSqlCollectionTypeNameSpec(convertedElementNameSpec,
collectionNameSpec.elementNullable(), collectionNameSpec.getCollectionTypeName(),
collectionNameSpec.unparseAsStandard(), collectionNameSpec.getParserPos());
}
} else if (nameSpec instanceof SqlMapTypeNameSpec) {
SqlMapTypeNameSpec mapNameSpec = (SqlMapTypeNameSpec) nameSpec;
SqlDataTypeSpec keyTypeSpec = mapNameSpec.getKeyType();
SqlDataTypeSpec valTypeSpec = mapNameSpec.getValType();
SqlDataTypeSpec convertedKeyTypeSpec = convertDataTypes(keyTypeSpec);
SqlDataTypeSpec convertedValTypeSpec = convertDataTypes(valTypeSpec);
if (keyTypeSpec != convertedKeyTypeSpec || valTypeSpec != convertedValTypeSpec) {
nameSpec = new SqlMapTypeNameSpec(convertedKeyTypeSpec, convertedValTypeSpec, nameSpec.getParserPos());
}
} else if (nameSpec instanceof ExtendedSqlRowTypeNameSpec) {
ExtendedSqlRowTypeNameSpec rowNameSpec = (ExtendedSqlRowTypeNameSpec) nameSpec;
List<SqlDataTypeSpec> fieldTypeSpecs = rowNameSpec.getFieldTypes();
List<SqlDataTypeSpec> convertedFieldTypeSpecs = new ArrayList<>(fieldTypeSpecs.size());
boolean updated = false;
for (SqlDataTypeSpec fieldTypeSpec : fieldTypeSpecs) {
SqlDataTypeSpec convertedFieldTypeSpec = convertDataTypes(fieldTypeSpec);
if (fieldTypeSpec != convertedFieldTypeSpec) {
updated = true;
}
convertedFieldTypeSpecs.add(convertedFieldTypeSpec);
}
if (updated) {
nameSpec = new ExtendedSqlRowTypeNameSpec(nameSpec.getParserPos(), rowNameSpec.getFieldNames(),
convertedFieldTypeSpecs, rowNameSpec.getComments(), rowNameSpec.unparseAsStandard());
}
}
return nameSpec;
}
|
class HiveDDLUtils {
public static final String COL_DELIMITER = ";";
private static final byte HIVE_CONSTRAINT_ENABLE = 1 << 2;
private static final byte HIVE_CONSTRAINT_VALIDATE = 1 << 1;
private static final byte HIVE_CONSTRAINT_RELY = 1;
private static final Set<String> RESERVED_DB_PROPERTIES = new HashSet<>();
private static final Set<String> RESERVED_TABLE_PROPERTIES = new HashSet<>();
private static final List<String> RESERVED_TABLE_PROP_PREFIX = new ArrayList<>();
static {
RESERVED_DB_PROPERTIES.addAll(Arrays.asList(ALTER_DATABASE_OP, DATABASE_LOCATION_URI));
RESERVED_TABLE_PROPERTIES.addAll(Arrays.asList(TABLE_LOCATION_URI,
TABLE_IS_EXTERNAL, PK_CONSTRAINT_TRAIT, NOT_NULL_CONSTRAINT_TRAITS,
STORED_AS_FILE_FORMAT, STORED_AS_INPUT_FORMAT, STORED_AS_OUTPUT_FORMAT, SERDE_LIB_CLASS_NAME));
RESERVED_TABLE_PROP_PREFIX.add(SERDE_INFO_PROP_PREFIX);
}
private HiveDDLUtils() {
}
public static SqlNodeList checkReservedDBProperties(SqlNodeList props) throws ParseException {
return checkReservedProperties(RESERVED_DB_PROPERTIES, props, "Databases");
}
public static SqlNodeList checkReservedTableProperties(SqlNodeList props) throws ParseException {
props = checkReservedProperties(RESERVED_TABLE_PROPERTIES, props, "Tables");
props = checkReservedPrefix(RESERVED_TABLE_PROP_PREFIX, props, "Tables");
return props;
}
public static SqlNodeList ensureNonGeneric(SqlNodeList props) throws ParseException {
for (SqlNode node : props) {
if (node instanceof SqlTableOption && ((SqlTableOption) node).getKeyString().equalsIgnoreCase(CatalogConfig.IS_GENERIC)) {
if (!((SqlTableOption) node).getValueString().equalsIgnoreCase("false")) {
throw new ParseException("Creating generic object with Hive dialect is not allowed");
}
}
}
return props;
}
private static SqlNodeList checkReservedPrefix(List<String> reserved, SqlNodeList properties, String metaType) throws ParseException {
if (properties == null) {
return null;
}
Set<String> match = new HashSet<>();
for (SqlNode node : properties) {
if (node instanceof SqlTableOption) {
String key = ((SqlTableOption) node).getKeyString();
for (String prefix : reserved) {
if (key.startsWith(prefix)) {
match.add(key);
}
}
}
}
if (!match.isEmpty()) {
throw new ParseException(String.format(
"Properties %s have reserved prefix and shouldn't be used for Hive %s", match, metaType));
}
return properties;
}
private static SqlNodeList checkReservedProperties(Set<String> reservedProperties, SqlNodeList properties,
String metaType) throws ParseException {
if (properties == null) {
return null;
}
Set<String> match = new HashSet<>();
for (SqlNode node : properties) {
if (node instanceof SqlTableOption) {
String key = ((SqlTableOption) node).getKeyString();
if (reservedProperties.contains(key)) {
match.add(key);
}
}
}
if (!match.isEmpty()) {
throw new ParseException(String.format(
"Properties %s are reserved and shouldn't be used for Hive %s", match, metaType));
}
return properties;
}
public static SqlTableOption toTableOption(String key, SqlNode value, SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key, pos), value, pos);
}
public static SqlTableOption toTableOption(String key, String value, SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key, pos), SqlLiteral.createCharString(value, pos), pos);
}
public static void convertDataTypes(SqlNodeList columns) {
if (columns != null) {
for (SqlNode node : columns) {
convertDataTypes((SqlTableColumn) node);
}
}
}
public static void convertDataTypes(SqlTableColumn column) {
column.setType(convertDataTypes(column.getType()));
}
private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec typeSpec) {
SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
if (nameSpec != convertedNameSpec) {
typeSpec = new SqlDataTypeSpec(convertedNameSpec, typeSpec.getTimeZone(), typeSpec.getNullable(),
typeSpec.getParserPosition());
}
return typeSpec;
}
public static byte defaultTrait() {
byte res = enableConstraint((byte) 0);
res = relyConstraint(res);
return res;
}
public static byte enableConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
}
public static byte disableConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
}
public static byte validateConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
}
public static byte noValidateConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_VALIDATE));
}
public static byte relyConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_RELY);
}
public static byte noRelyConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_RELY));
}
public static boolean requireEnableConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_ENABLE) != 0;
}
public static boolean requireValidateConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_VALIDATE) != 0;
}
public static boolean requireRelyConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_RELY) != 0;
}
public static void unparseConstraintTrait(byte trait, SqlWriter writer) {
if (requireEnableConstraint(trait)) {
writer.keyword("ENABLE");
} else {
writer.keyword("DISABLE");
}
if (requireValidateConstraint(trait)) {
writer.keyword("VALIDATE");
} else {
writer.keyword("NOVALIDATE");
}
if (requireRelyConstraint(trait)) {
writer.keyword("RELY");
} else {
writer.keyword("NORELY");
}
}
}
|
class HiveDDLUtils {
public static final String COL_DELIMITER = ";";
private static final byte HIVE_CONSTRAINT_ENABLE = 1 << 2;
private static final byte HIVE_CONSTRAINT_VALIDATE = 1 << 1;
private static final byte HIVE_CONSTRAINT_RELY = 1;
private static final Set<String> RESERVED_DB_PROPERTIES = new HashSet<>();
private static final Set<String> RESERVED_TABLE_PROPERTIES = new HashSet<>();
private static final List<String> RESERVED_TABLE_PROP_PREFIX = new ArrayList<>();
static {
RESERVED_DB_PROPERTIES.addAll(Arrays.asList(ALTER_DATABASE_OP, DATABASE_LOCATION_URI));
RESERVED_TABLE_PROPERTIES.addAll(Arrays.asList(TABLE_LOCATION_URI,
TABLE_IS_EXTERNAL, PK_CONSTRAINT_TRAIT, NOT_NULL_CONSTRAINT_TRAITS,
STORED_AS_FILE_FORMAT, STORED_AS_INPUT_FORMAT, STORED_AS_OUTPUT_FORMAT, SERDE_LIB_CLASS_NAME));
RESERVED_TABLE_PROP_PREFIX.add(SERDE_INFO_PROP_PREFIX);
}
private HiveDDLUtils() {
}
public static SqlNodeList checkReservedDBProperties(SqlNodeList props) throws ParseException {
return checkReservedProperties(RESERVED_DB_PROPERTIES, props, "Databases");
}
public static SqlNodeList checkReservedTableProperties(SqlNodeList props) throws ParseException {
props = checkReservedProperties(RESERVED_TABLE_PROPERTIES, props, "Tables");
props = checkReservedPrefix(RESERVED_TABLE_PROP_PREFIX, props, "Tables");
return props;
}
public static SqlNodeList ensureNonGeneric(SqlNodeList props) throws ParseException {
for (SqlNode node : props) {
if (node instanceof SqlTableOption && ((SqlTableOption) node).getKeyString().equalsIgnoreCase(CatalogConfig.IS_GENERIC)) {
if (!((SqlTableOption) node).getValueString().equalsIgnoreCase("false")) {
throw new ParseException("Creating generic object with Hive dialect is not allowed");
}
}
}
return props;
}
private static SqlNodeList checkReservedPrefix(List<String> reserved, SqlNodeList properties, String metaType) throws ParseException {
if (properties == null) {
return null;
}
Set<String> match = new HashSet<>();
for (SqlNode node : properties) {
if (node instanceof SqlTableOption) {
String key = ((SqlTableOption) node).getKeyString();
for (String prefix : reserved) {
if (key.startsWith(prefix)) {
match.add(key);
}
}
}
}
if (!match.isEmpty()) {
throw new ParseException(String.format(
"Properties %s have reserved prefix and shouldn't be used for Hive %s", match, metaType));
}
return properties;
}
private static SqlNodeList checkReservedProperties(Set<String> reservedProperties, SqlNodeList properties,
String metaType) throws ParseException {
if (properties == null) {
return null;
}
Set<String> match = new HashSet<>();
for (SqlNode node : properties) {
if (node instanceof SqlTableOption) {
String key = ((SqlTableOption) node).getKeyString();
if (reservedProperties.contains(key)) {
match.add(key);
}
}
}
if (!match.isEmpty()) {
throw new ParseException(String.format(
"Properties %s are reserved and shouldn't be used for Hive %s", match, metaType));
}
return properties;
}
public static SqlTableOption toTableOption(String key, SqlNode value, SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key, pos), value, pos);
}
public static SqlTableOption toTableOption(String key, String value, SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key, pos), SqlLiteral.createCharString(value, pos), pos);
}
public static void convertDataTypes(SqlNodeList columns) throws ParseException {
if (columns != null) {
for (SqlNode node : columns) {
convertDataTypes((SqlTableColumn) node);
}
}
}
public static void convertDataTypes(SqlTableColumn column) throws ParseException {
column.setType(convertDataTypes(column.getType()));
}
private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec typeSpec) throws ParseException {
SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
if (nameSpec != convertedNameSpec) {
typeSpec = new SqlDataTypeSpec(convertedNameSpec, typeSpec.getTimeZone(), typeSpec.getNullable(),
typeSpec.getParserPosition());
}
return typeSpec;
}
public static byte defaultTrait() {
byte res = enableConstraint((byte) 0);
res = relyConstraint(res);
return res;
}
public static byte enableConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
}
public static byte disableConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
}
public static byte validateConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
}
public static byte noValidateConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_VALIDATE));
}
public static byte relyConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_RELY);
}
public static byte noRelyConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_RELY));
}
public static boolean requireEnableConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_ENABLE) != 0;
}
public static boolean requireValidateConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_VALIDATE) != 0;
}
public static boolean requireRelyConstraint(byte trait) {
return (trait & HIVE_CONSTRAINT_RELY) != 0;
}
public static byte encodeConstraintTrait(SqlHiveConstraintTrait trait) {
byte res = 0;
if (trait.isEnable()) {
res = enableConstraint(res);
}
if (trait.isValidate()) {
res = validateConstraint(res);
}
if (trait.isRely()) {
res = relyConstraint(res);
}
return res;
}
public static SqlNodeList deepCopyColList(SqlNodeList colList) {
SqlNodeList res = new SqlNodeList(colList.getParserPosition());
for (SqlNode node : colList) {
SqlTableColumn col = (SqlTableColumn) node;
res.add(new SqlTableColumn(
col.getName(),
col.getType(),
col.getConstraint().orElse(null),
col.getComment().orElse(null),
col.getParserPosition()));
}
return res;
}
}
|
Since these are created in the constructor, how it will react to the dynamic option modification? (for example, when I remove or add a new metrics category, will it reevaluate requests timer etc?)
|
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
Tags tags = Tags.of(endpoint.clientMetricTag());
this.metricCategories = client.getMetricCategories();
if (metricCategories.contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
this.requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(optionsAccessor.getPercentiles(options))
.publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options))
.tags(getEffectiveTags(tags, options))
.register(registry);
} else {
this.requests = null;
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
this.responseErrors = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(optionsAccessor.getPercentiles(options))
.publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options))
.tags(getEffectiveTags(tags, options))
.register(registry);
} else {
this.responseErrors = null;
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
this.responseSuccesses = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(optionsAccessor.getPercentiles(options))
.publishPercentileHistogram(optionsAccessor.isHistogramPublishingEnabled(options))
.tags(getEffectiveTags(tags, options))
.register(registry);
} else {
this.responseSuccesses = null;
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
this.requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
} else {
this.requestSize = null;
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
this.responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
} else {
this.responseSize = null;
}
} else {
this.requests = null;
this.responseErrors = null;
this.responseSuccesses = null;
this.requestSize = null;
this.responseSize= null;
}
if (metricCategories.contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMeterName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (metricCategories.contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint,
RntbdEndpoint::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint,
RntbdEndpoint::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMeterName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
|
this.requests = null;
|
private RntbdMetricsV2(MeterRegistry registry, RntbdTransportClient client, RntbdEndpoint endpoint) {
this.tags = Tags.of(endpoint.clientMetricTag(), endpoint.tag());
this.client = client;
this.registry = registry;
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_CONCURRENT_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::concurrentRequests)
.description("RNTBD concurrent requests (executing or queued request count)")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_QUEUED_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::requestQueueLength)
.description("RNTBD queued request count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectEndpoints)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), client, RntbdTransportClient::endpointCount)
.description("RNTBD endpoint count")
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_ENDPOINTS_EVICTED);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
client,
RntbdTransportClient::endpointEvictionCount)
.description("RNTBD endpoint eviction count")
.register(registry);
}
}
if (this.client.getMetricCategories().contains(MetricCategory.DirectChannels)) {
CosmosMeterOptions options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_ACQUIRED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint,
RntbdEndpoint::totalChannelsAcquiredMetric)
.description("RNTBD acquired channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_CLOSED_COUNT);
if (options.isEnabled()) {
FunctionCounter.builder(
options.getMeterName().toString(),
endpoint,
RntbdEndpoint::totalChannelsClosedMetric)
.description("RNTBD closed channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_CHANNELS_AVAILABLE_COUNT);
if (options.isEnabled()) {
Gauge.builder(options.getMeterName().toString(), endpoint, RntbdEndpoint::channelsAvailableMetric)
.description("RNTBD available channel count")
.tags(getEffectiveTags(tags, options))
.register(registry);
}
}
}
|
class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final DistributionSummary requestSize;
private final Timer requests;
private final Timer responseErrors;
private final DistributionSummary responseSize;
private final Timer responseSuccesses;
private final EnumSet<MetricCategory> metricCategories;
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.metricCategories.contains(MetricCategory.DirectRequests)) {
requestRecord.stop(this.requests, requestRecord.isCompletedExceptionally()
? this.responseErrors
: this.responseSuccesses);
if (this.requestSize != null) {
this.requestSize.record(requestRecord.requestLength());
}
if (this.responseSize != null) {
this.responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
|
class RntbdMetricsV2 implements RntbdMetricsCompletionRecorder {
private final RntbdTransportClient client;
private final Tags tags;
private final MeterRegistry registry;
public void markComplete(RntbdRequestRecord requestRecord) {
if (this.client.getMetricCategories().contains(MetricCategory.DirectRequests)) {
Timer requests = null;
Timer requestsSuccess = null;
Timer requestsFailed = null;
CosmosMeterOptions options = this.client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY);
if (options.isEnabled()) {
requests = Timer
.builder(options.getMeterName().toString())
.description("RNTBD request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(this.tags, options))
.register(this.registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_FAILED);
if (options.isEnabled()) {
requestsFailed = Timer
.builder(options.getMeterName().toString())
.description("RNTBD failed request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_LATENCY_SUCCESS);
if (options.isEnabled()) {
requestsSuccess = Timer
.builder(options.getMeterName().toString())
.description("RNTBD successful request latency")
.maximumExpectedValue(Duration.ofSeconds(300))
.publishPercentiles(options.getPercentiles())
.publishPercentileHistogram(options.isHistogramPublishingEnabled())
.tags(getEffectiveTags(tags, options))
.register(registry);
}
requestRecord.stop(
requests,
requestRecord.isCompletedExceptionally() ? requestsFailed : requestsSuccess);
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_REQUEST);
if (options.isEnabled()) {
DistributionSummary requestSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD request size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
requestSize.record(requestRecord.requestLength());
}
options = client
.getMeterOptions(CosmosMetricName.DIRECT_REQUEST_SIZE_RESPONSE);
if (options.isEnabled()) {
DistributionSummary responseSize = DistributionSummary.builder(options.getMeterName().toString())
.description("RNTBD response size (bytes)")
.baseUnit("bytes")
.tags(getEffectiveTags(tags, options))
.maximumExpectedValue(16_000_000d)
.publishPercentileHistogram(false)
.publishPercentiles()
.register(registry);
responseSize.record(requestRecord.responseLength());
}
} else {
requestRecord.stop();
}
}
}
|
Do we need a add another test case to cover the TTL(auto drop out of date partition) feature?
|
public void testConflictAlterOperations() throws Exception {
String stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), drop partition p4";
alterTable(stmt, true);
stmt = "alter table test.tbl1 drop partition p3, drop partition p4";
alterTable(stmt, true);
stmt = "alter table test.tbl1 drop partition p3, add column k3 int";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add column k3 int, add column k4 int";
alterTable(stmt, false);
waitSchemaChangeJobDone(false);
stmt = "alter table test.tbl1 add rollup r1 (k1)";
alterTable(stmt, false);
waitSchemaChangeJobDone(true);
stmt = "alter table test.tbl1 add rollup r2 (k1), r3 (k1)";
alterTable(stmt, false);
waitSchemaChangeJobDone(true);
stmt = "alter table test.tbl1 set (\n" +
"'dynamic_partition.enable' = 'true',\n" +
"'dynamic_partition.time_unit' = 'DAY',\n" +
"'dynamic_partition.end' = '3',\n" +
"'dynamic_partition.prefix' = 'p',\n" +
"'dynamic_partition.buckets' = '3'\n" +
" );";
alterTable(stmt, false);
Database db = Catalog.getCurrentCatalog().getDb("default_cluster:test");
OlapTable tbl = (OlapTable)db.getTable("tbl1");
Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable());
Assert.assertEquals(4, tbl.getIndexIdToSchema().size());
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add temporary partition tp3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')";
alterTable(stmt, false);
Assert.assertEquals(1, tbl.getTempPartitions().size());
stmt = "alter table test.tbl1 set ('dynamic_partition.enable' = 'false')";
alterTable(stmt, false);
Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable());
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("1"), tbl.getDefaultReplicationNum());
stmt = "alter table test.tbl1 set ('default.replication_num' = '3');";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("3"), tbl.getDefaultReplicationNum());
Partition p1 = tbl.getPartition("p1");
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicationNum(p1.getId())));
stmt = "alter table test.tbl1 set ('replication_num' = '3');";
alterTable(stmt, true);
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicationNum(p1.getId())));
OlapTable tbl2 = (OlapTable) db.getTable("tbl2");
Partition partition = tbl2.getPartition(tbl2.getName());
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId())));
stmt = "alter table test.tbl2 set ('replication_num' = '3');";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("3"), Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId())));
stmt = "alter table test.tbl1 add partition p4 values less than('2020-05-01')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add partition p4 values less than('2020-05-01') ('replication_num' = '1')";
alterTable(stmt, false);
}
|
public void testConflictAlterOperations() throws Exception {
String stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), add partition p4 values less than('2020-05-01')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), drop partition p4";
alterTable(stmt, true);
stmt = "alter table test.tbl1 drop partition p3, drop partition p4";
alterTable(stmt, true);
stmt = "alter table test.tbl1 drop partition p3, add column k3 int";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add column k3 int, add column k4 int";
alterTable(stmt, false);
waitSchemaChangeJobDone(false);
stmt = "alter table test.tbl1 add rollup r1 (k1)";
alterTable(stmt, false);
waitSchemaChangeJobDone(true);
stmt = "alter table test.tbl1 add rollup r2 (k1), r3 (k1)";
alterTable(stmt, false);
waitSchemaChangeJobDone(true);
stmt = "alter table test.tbl1 set (\n" +
"'dynamic_partition.enable' = 'true',\n" +
"'dynamic_partition.time_unit' = 'DAY',\n" +
"'dynamic_partition.end' = '3',\n" +
"'dynamic_partition.prefix' = 'p',\n" +
"'dynamic_partition.buckets' = '3'\n" +
" );";
alterTable(stmt, false);
Database db = Catalog.getCurrentCatalog().getDb("default_cluster:test");
OlapTable tbl = (OlapTable)db.getTable("tbl1");
Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().getEnable());
Assert.assertEquals(4, tbl.getIndexIdToSchema().size());
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add temporary partition tp3 values less than('2020-04-01') distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')";
alterTable(stmt, false);
Assert.assertEquals(1, tbl.getTempPartitions().size());
stmt = "alter table test.tbl1 set ('dynamic_partition.enable' = 'false')";
alterTable(stmt, false);
Assert.assertFalse(tbl.getTableProperty().getDynamicPartitionProperty().getEnable());
stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') distributed by hash(k2) buckets 4";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("1"), tbl.getDefaultReplicationNum());
stmt = "alter table test.tbl1 set ('default.replication_num' = '3');";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("3"), tbl.getDefaultReplicationNum());
Partition p1 = tbl.getPartition("p1");
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicationNum(p1.getId())));
stmt = "alter table test.tbl1 set ('replication_num' = '3');";
alterTable(stmt, true);
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicationNum(p1.getId())));
OlapTable tbl2 = (OlapTable) db.getTable("tbl2");
Partition partition = tbl2.getPartition(tbl2.getName());
Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId())));
stmt = "alter table test.tbl2 set ('replication_num' = '3');";
alterTable(stmt, false);
Assert.assertEquals(Short.valueOf("3"), Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId())));
Thread.sleep(5000);
stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10')";
alterTable(stmt, true);
stmt = "alter table test.tbl1 add partition p4 values less than('2020-04-10') ('replication_num' = '1')";
alterTable(stmt, false);
}
|
class AlterTest {
private static String runningDir = "fe/mocked/AlterTest/" + UUID.randomUUID().toString() + "/";
private static ConnectContext connectContext;
@BeforeClass
public static void beforeClass() throws Exception {
FeConstants.runningUnitTest = true;
FeConstants.default_scheduler_interval_millisecond = 100;
Config.dynamic_partition_enable = true;
UtFrameUtils.createMinDorisCluster(runningDir);
connectContext = UtFrameUtils.createDefaultCtx();
String createDbStmtStr = "create database test;";
CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext);
Catalog.getCurrentCatalog().createDb(createDbStmt);
createTable("CREATE TABLE test.tbl1\n" +
"(\n" +
" k1 date,\n" +
" k2 int,\n" +
" v1 int sum\n" +
")\n" +
"PARTITION BY RANGE(k1)\n" +
"(\n" +
" PARTITION p1 values less than('2020-02-01'),\n" +
" PARTITION p2 values less than('2020-03-01')\n" +
")\n" +
"DISTRIBUTED BY HASH(k2) BUCKETS 3\n" +
"PROPERTIES('replication_num' = '1');");
createTable("CREATE TABLE test.tbl2\n" +
"(\n" +
" k1 date,\n" +
" v1 int sum\n" +
")\n" +
"DISTRIBUTED BY HASH (k1) BUCKETS 3\n" +
"PROPERTIES('replication_num' = '1');");
}
@AfterClass
public static void tearDown() {
File file = new File(runningDir);
file.delete();
}
private static void createTable(String sql) throws Exception {
CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext);
Catalog.getCurrentCatalog().createTable(createTableStmt);
}
private static void alterTable(String sql, boolean expectedException) throws Exception {
AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext);
try {
Catalog.getCurrentCatalog().alterTable(alterTableStmt);
if (expectedException) {
Assert.fail();
}
} catch (Exception e) {
e.printStackTrace();
if (!expectedException) {
Assert.fail();
}
}
}
@Test
private void waitSchemaChangeJobDone(boolean rollupJob) throws InterruptedException {
Map<Long, AlterJobV2> alterJobs = Catalog.getCurrentCatalog().getSchemaChangeHandler().getAlterJobsV2();
if (rollupJob) {
alterJobs = Catalog.getCurrentCatalog().getRollupHandler().getAlterJobsV2();
}
for (AlterJobV2 alterJobV2 : alterJobs.values()) {
while (!alterJobV2.getJobState().isFinalState()) {
System.out.println("alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState());
Thread.sleep(1000);
}
System.out.println(alterJobV2.getType() + " alter job " + alterJobV2.getJobId() + " is done. state: " + alterJobV2.getJobState());
Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState());
}
}
}
|
class AlterTest {
private static String runningDir = "fe/mocked/AlterTest/" + UUID.randomUUID().toString() + "/";
private static ConnectContext connectContext;
@BeforeClass
public static void beforeClass() throws Exception {
FeConstants.runningUnitTest = true;
FeConstants.default_scheduler_interval_millisecond = 100;
Config.dynamic_partition_enable = true;
Config.dynamic_partition_check_interval_seconds = 1;
UtFrameUtils.createMinDorisCluster(runningDir);
connectContext = UtFrameUtils.createDefaultCtx();
String createDbStmtStr = "create database test;";
CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, connectContext);
Catalog.getCurrentCatalog().createDb(createDbStmt);
createTable("CREATE TABLE test.tbl1\n" +
"(\n" +
" k1 date,\n" +
" k2 int,\n" +
" v1 int sum\n" +
")\n" +
"PARTITION BY RANGE(k1)\n" +
"(\n" +
" PARTITION p1 values less than('2020-02-01'),\n" +
" PARTITION p2 values less than('2020-03-01')\n" +
")\n" +
"DISTRIBUTED BY HASH(k2) BUCKETS 3\n" +
"PROPERTIES('replication_num' = '1');");
createTable("CREATE TABLE test.tbl2\n" +
"(\n" +
" k1 date,\n" +
" v1 int sum\n" +
")\n" +
"DISTRIBUTED BY HASH (k1) BUCKETS 3\n" +
"PROPERTIES('replication_num' = '1');");
createTable("CREATE TABLE test.tbl3\n" +
"(\n" +
" k1 date,\n" +
" k2 int,\n" +
" v1 int sum\n" +
")\n" +
"PARTITION BY RANGE(k1)\n" +
"(\n" +
" PARTITION p1 values less than('2020-02-01'),\n" +
" PARTITION p2 values less than('2020-03-01')\n" +
")\n" +
"DISTRIBUTED BY HASH(k2) BUCKETS 3\n" +
"PROPERTIES('replication_num' = '1');");
}
@AfterClass
public static void tearDown() {
File file = new File(runningDir);
file.delete();
}
private static void createTable(String sql) throws Exception {
CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext);
Catalog.getCurrentCatalog().createTable(createTableStmt);
}
private static void alterTable(String sql, boolean expectedException) throws Exception {
AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, connectContext);
try {
Catalog.getCurrentCatalog().alterTable(alterTableStmt);
if (expectedException) {
Assert.fail();
}
} catch (Exception e) {
e.printStackTrace();
if (!expectedException) {
Assert.fail();
}
}
}
@Test
@Test
public void testDynamicPartitionDropAndAdd() throws Exception {
String stmt = "alter table test.tbl3 set (\n" +
"'dynamic_partition.enable' = 'true',\n" +
"'dynamic_partition.time_unit' = 'DAY',\n" +
"'dynamic_partition.start' = '-3',\n" +
"'dynamic_partition.end' = '3',\n" +
"'dynamic_partition.prefix' = 'p',\n" +
"'dynamic_partition.buckets' = '3'\n" +
" );";
alterTable(stmt, false);
Thread.sleep(5000);
Database db = Catalog.getCurrentCatalog().getDb("default_cluster:test");
OlapTable tbl = (OlapTable) db.getTable("tbl3");
Assert.assertEquals(4, tbl.getPartitionNames().size());
Assert.assertNull(tbl.getPartition("p1"));
Assert.assertNull(tbl.getPartition("p2"));
}
private void waitSchemaChangeJobDone(boolean rollupJob) throws InterruptedException {
Map<Long, AlterJobV2> alterJobs = Catalog.getCurrentCatalog().getSchemaChangeHandler().getAlterJobsV2();
if (rollupJob) {
alterJobs = Catalog.getCurrentCatalog().getRollupHandler().getAlterJobsV2();
}
for (AlterJobV2 alterJobV2 : alterJobs.values()) {
while (!alterJobV2.getJobState().isFinalState()) {
System.out.println("alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState());
Thread.sleep(1000);
}
System.out.println(alterJobV2.getType() + " alter job " + alterJobV2.getJobId() + " is done. state: " + alterJobV2.getJobState());
Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState());
}
}
}
|
|
How about no `deployment.xml` at all?
|
void testDevDeployment() {
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm());
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
|
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm());
|
void testDevDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]);
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
.setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4);
context.runJob(zone, applicationPackage);
assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(),
"Application deployed and activated");
assertTrue(context.instanceJobs().isEmpty(),
"No job status added");
assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored");
Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone))
.asList()
.stream()
.map(Endpoint::routingMethod)
.collect(Collectors.toSet());
assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4));
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
tester.controller().applications().deactivate(context.instanceId(), zone);
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(new DeploymentId(context.instanceId(), zone))
.get(tester.clock().instant()));
}
|
class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(Comparator.comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-west-1")
.region("us-east-3")
.applicationEndpoint("a", "default", "us-west-1",
Map.of(beta.instance(), 2,
main.instance(), 8))
.applicationEndpoint("b", "default", "us-west-1",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "us-east-3",
Map.of(beta.instance(), 4,
main.instance(), 6))
.build();
context.submit(applicationPackage).deploy();
ZoneId usWest = ZoneId.from("prod", "us-west-1");
ZoneId usEast = ZoneId.from("prod", "us-east-3");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2,
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1),
new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8,
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1),
new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4),
new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
RoutingMethod.sharedLayer4))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(Set.of(new Record(Record.Type.CNAME,
RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3."))),
records);
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"c.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithGlobalEndpointsInGcp() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("gcp-us-east1-b")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("gcp-us-east1-b")
.endpoint("gcp", "default", "gcp-us-east1-b")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
}
|
class ControllerTest {
private final DeploymentTester tester = new DeploymentTester();
@Test
void testDeployment() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.explicitEnvironment(Environment.dev, Environment.perf)
.region("us-west-1")
.region("us-east-3")
.build();
Version version1 = tester.configServer().initialVersion();
var context = tester.newDeploymentContext();
context.submit(applicationPackage);
assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
context.runJob(stagingTest);
RevisionId applicationVersion = context.instance().change().revision().get();
assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment");
tester.triggerJobs();
tester.clock().advance(Duration.ofSeconds(1));
context.timeOutUpgrade(productionUsWest1);
assertEquals(4, context.instanceJobs().size());
tester.triggerJobs();
tester.controllerTester().createNewController();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
assertNotNull(tester.controller().applications().requireInstance(context.instanceId()));
context.submit(applicationPackage);
context.runJob(systemTest);
context.runJob(stagingTest);
context.triggerJobs().jobAborted(productionUsWest1);
context.runJob(productionUsWest1);
tester.triggerJobs();
context.runJob(productionUsEast3);
assertEquals(4, context.instanceJobs().size());
applicationPackage = new ApplicationPackageBuilder()
.instances("hellO")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("deep-space-9")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception due to illegal deployment spec.");
}
catch (IllegalArgumentException e) {
assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage());
}
applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
try {
assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1")));
context.submit(applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval),
e.getMessage());
}
assertNotNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was not removed");
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.upgradePolicy("default")
.region("us-east-3")
.build();
context.submit(applicationPackage);
assertNull(context.instance().deployments().get(productionUsWest1.zone()),
"Zone was removed");
assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed");
assertNotNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.clock().advance(Duration.ofSeconds(1));
context.submit(ApplicationPackage.deploymentRemoval());
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.instanceId().tenant()));
assertArrayEquals(new byte[0],
tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.instanceId())
.get(tester.clock().instant()));
assertNull(tester.controllerTester().serviceRegistry().applicationStore()
.getMeta(context.deploymentIdIn(productionUsWest1.zone())));
}
@Test
void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.build();
context.submit(applicationPackage).deploy();
var deployment1 = context.deploymentIdIn(zone1);
DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1);
RoutingStatus status1 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.in, status1.value());
routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator);
RoutingStatus status2 = routingContext.routingStatus();
assertEquals(RoutingStatus.Value.out, status2.value());
RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus();
assertEquals(RoutingStatus.Value.in, status3.value());
}
@Test
void testDnsUpdatesForGlobalEndpoint() {
var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta");
var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default");
ZoneId usWest = ZoneId.from("prod.us-west-1");
ZoneId usCentral = ZoneId.from("prod.us-central-1");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,default")
.endpoint("default", "foo")
.region(usWest.region())
.region(usCentral.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)),
RoutingMethod.sharedLayer4);
betaContext.submit(applicationPackage).deploy();
{
Collection<Deployment> betaDeployments = betaContext.instance().deployments().values();
assertFalse(betaDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints()
.get(betaContext.deploymentIdIn(deployment.zone())));
}
betaContext.flushDnsUpdates();
}
{
Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values();
assertFalse(defaultDeployments.isEmpty());
Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo",
"global",
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
RoutingMethod.sharedLayer4));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
}
defaultContext.flushDnsUpdates();
}
Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.",
"app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02.");
rotationCnames.forEach((cname, data) -> {
var record = tester.controllerTester().findCname(cname);
assertTrue(record.isPresent());
assertEquals(cname, record.get().name().asString());
assertEquals(data, record.get().data().asString());
});
Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"),
defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud"));
globalDnsNamesByInstance.forEach((instance, dnsNames) -> {
Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance)
.scope(Endpoint.Scope.global)
.asList().stream()
.map(Endpoint::dnsName)
.collect(Collectors.toSet());
assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance);
});
}
@Test
void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals(Set.of("rotation-id-01",
"app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(1, tester.controllerTester().nameService().records().size());
Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId())
.scope(Endpoint.Scope.global)
.sortedBy(Comparator.comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"),
globalDnsNames);
}
@Test
void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1")
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.endpoint("all", "qrs")
.endpoint("west", "qrs", "us-west-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
Collection<Deployment> deployments = context.instance().deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
"rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud"
);
var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud"));
for (Deployment deployment : deployments) {
assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())),
"Rotation names are passed to config server in " + deployment.zone());
}
context.flushDnsUpdates();
assertEquals(4, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud");
assertTrue(record1.isPresent());
assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record2.isPresent());
assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record3.isPresent());
assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString());
assertEquals("rotation-fqdn-03.", record3.get().data().asString());
var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud");
assertTrue(record4.isPresent());
assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString());
assertEquals("rotation-fqdn-04.", record4.get().data().asString());
}
@Test
void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
var east = ZoneId.from("prod", "us-east-3");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage2).deploy();
for (var zone : List.of(west, central)) {
assertEquals(
Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
assertEquals(
Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(east))
,
"Zone " + east + " is a member of global endpoint");
ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage3).deploy();
for (var zone : List.of(west, central, east)) {
assertEquals(
zone.equals(east)
? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud")
: Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone))
,
"Zone " + zone + " is a member of global endpoint");
}
ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder()
.endpoint("default", "qrs", west.region().value(), central.region().value())
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage4);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " +
"and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage5);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " +
"[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " +
"but does not include all of these in deployment.xml. Deploying given deployment.xml " +
"will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " +
ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage());
}
ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder()
.endpoint("east", "qrs", east.region().value())
.region(west.region().value())
.region(central.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage6);
}
@Test
void testUnassignRotations() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "qrs", "us-west-1", "us-central-1")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-central-1")
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage2).deploy();
assertEquals(List.of(), context.instance().rotations());
assertEquals(
Set.of(),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1")))
);
}
@Test
void testDnsUpdatesWithChangeInRotationAssignment() {
String dnsName1 = "app1.tenant1.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
{
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isPresent());
assertEquals(dnsName1, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
applicationPackage = new ApplicationPackageBuilder()
.allow(ValidationId.deploymentRemoval)
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage);
tester.applications().deleteApplication(context.application().id(),
tester.controllerTester().credentialsFor(context.application().id().tenant()));
try (RotationLock lock = tester.controller().routing().rotations().lock()) {
assertTrue(tester.controller().routing().rotations().availableRotations(lock)
.containsKey(new RotationId("rotation-id-01")),
"Rotation is unassigned");
}
context.flushDnsUpdates();
Optional<Record> record = tester.controllerTester().findCname(dnsName1);
assertTrue(record.isEmpty(), dnsName1 + " is removed");
}
String dnsName2 = "app2.tenant2.global.vespa.oath.cloud";
{
var context = tester.newDeploymentContext("tenant2", "app2", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals(1, tester.controllerTester().nameService().records().size());
var record = tester.controllerTester().findCname(dnsName2);
assertTrue(record.isPresent());
assertEquals(dnsName2, record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
}
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region("us-west-1")
.region("us-central-1")
.build();
context.submit(applicationPackage).deploy();
assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString());
assertEquals(2, tester.controllerTester().nameService().records().size());
var record1 = tester.controllerTester().findCname(dnsName1);
assertTrue(record1.isPresent());
assertEquals("rotation-fqdn-02.", record1.get().data().asString());
var record2 = tester.controllerTester().findCname(dnsName2);
assertTrue(record2.isPresent());
assertEquals("rotation-fqdn-01.", record2.get().data().asString());
}
}
@Test
void testDnsUpdatesForApplicationEndpoint() {
ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta");
ApplicationId main = ApplicationId.from("tenant1", "app1", "main");
var context = tester.newDeploymentContext(beta);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("beta,main")
.region("us-west-1")
.region("us-east-3")
.applicationEndpoint("a", "default", "us-west-1",
Map.of(beta.instance(), 2,
main.instance(), 8))
.applicationEndpoint("b", "default", "us-west-1",
Map.of(beta.instance(), 1,
main.instance(), 1))
.applicationEndpoint("c", "default", "us-east-3",
Map.of(beta.instance(), 4,
main.instance(), 6))
.build();
context.submit(applicationPackage).deploy();
ZoneId usWest = ZoneId.from("prod", "us-west-1");
ZoneId usEast = ZoneId.from("prod", "us-east-3");
Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of(
new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2,
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1),
new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8,
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1),
new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4),
new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6)
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
.map(kv -> new ContainerEndpoint("default", "application",
List.of(kv.getKey()),
OptionalInt.of(kv.getValue()),
RoutingMethod.sharedLayer4))
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
"Endpoint names for " + deployment + " are passed to config server");
});
context.flushDnsUpdates();
Set<Record> records = tester.controllerTester().nameService().records();
assertEquals(Set.of(new Record(Record.Type.CNAME,
RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-west-1.")),
new Record(Record.Type.CNAME,
RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3."))),
records);
List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.mapToList(Endpoint::dnsName);
assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"b.app1.tenant1.us-west-1-r.vespa.oath.cloud",
"c.app1.tenant1.us-east-3-r.vespa.oath.cloud"),
endpointDnsNames);
}
@Test
@Test
void testDevDeploymentWithIncompatibleVersions() {
Version version1 = new Version("7");
Version version2 = new Version("7.5");
Version version3 = new Version("8");
var context = tester.newDeploymentContext();
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
tester.controllerTester().upgradeSystem(version2);
ZoneId zone = ZoneId.from("dev", "us-east-1");
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build());
assertEquals(version2, context.deployment(zone).version());
assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major that does not yet exist");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage());
}
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms are compatible with compile version 8", e.getMessage());
}
tester.controllerTester().upgradeSystem(version3);
try {
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build());
fail("Should fail when specifying a major which is incompatible with compile version");
}
catch (IllegalArgumentException e) {
assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage());
}
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build());
assertEquals(version3, context.deployment(zone).version());
assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion());
}
@Test
void testSuspension() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.region("us-east-3")
.build();
context.submit(applicationPackage).deploy();
DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspension(deployment1, true);
assertTrue(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
}
@Test
void testDeletingApplicationThatHasAlreadyBeenDeleted() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
context.submit(applicationPackage).runJob(zone, applicationPackage);
tester.controller().applications().deactivate(context.instanceId(), zone);
tester.controller().applications().deactivate(context.instanceId(), zone);
}
@Test
void testDeployApplicationWithWarnings() {
var context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings);
context.submit(applicationPackage).deploy();
assertEquals(warnings, context.deployment(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@Test
void testDeploySelectivelyProvisionsCertificate() {
Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id());
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
var prodZone = ZoneId.from("prod", "us-west-1");
var stagingZone = ZoneId.from("staging", "us-east-3");
var testZone = ZoneId.from("test", "us-east-1");
tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone));
var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.region(prodZone.region())
.build();
context1.submit(applicationPackage).deploy();
var cert = certificate.apply(context1.instance());
assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod);
assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
"app1.tenant1.global.vespa.oath.cloud",
"*.app1.tenant1.global.vespa.oath.cloud"),
Stream.of(prodZone, testZone, stagingZone)
.flatMap(zone -> Stream.of("", "*.")
.map(prefix -> prefix + "app1.tenant1." + zone.region().value() +
(zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) +
".vespa.oath.cloud")))
.collect(Collectors.toUnmodifiableSet()),
Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId())));
context1.submit(applicationPackage).deploy();
assertEquals(cert, certificate.apply(context1.instance()));
var context2 = tester.newDeploymentContext("tenant1", "app2", "default");
var devZone = ZoneId.from("dev", "us-east-1");
context2.runJob(devZone, applicationPackage);
assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(),
"Application deployed and activated");
assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer");
}
@Test
void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("aws-us-east-1")
.region("us-west-1")
.endpoint("aws", "default", "aws-us-east-1")
.endpoint("foo", "default", "aws-us-east-1", "us-west-1")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage());
}
}
@Test
void testDeployWithGlobalEndpointsInGcp() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("test.us-west-1"),
ZoneApiMock.fromId("staging.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build()
);
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.region("gcp-us-east1-b")
.endpoint("default", "default")
.build();
try {
context.submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage());
}
var applicationPackage2 = new ApplicationPackageBuilder()
.region("gcp-us-east1-b")
.endpoint("gcp", "default", "gcp-us-east1-b")
.build();
try {
context.submit(applicationPackage2);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage());
}
}
@Test
void testDeployWithoutSourceRevision() {
var context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.region("us-west-1")
.build();
context.submit(applicationPackage, Optional.empty())
.deploy();
assertEquals(1, context.instance().deployments().size(), "Deployed application");
}
@Test
void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
.endpoint("default", "default", zone1.region().value(), zone2.region().value())
.endpoint("east", "default", zone2.region().value())
.region(zone1.region())
.region(zone2.region())
.build();
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4);
tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive);
context.submit(applicationPackage).deploy();
var expectedRecords = List.of(
new Record(Record.Type.ALIAS,
RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"),
new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"),
"dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()),
new Record(Record.Type.ALIAS,
RecordName.from("east.application.tenant.global.vespa.oath.cloud"),
new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"),
"dns-zone-1", ZoneId.from("prod.us-east-3")).pack()),
new Record(Record.Type.CNAME,
RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"),
RecordData.from("lb-0--tenant.application.default--prod.us-east-3.")));
assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records()));
}
@Test
void testDeploymentDirectRouting() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
var zone3 = ZoneId.from("prod", "eu-west-1");
tester.controllerTester().zoneRegistry()
.exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3));
var applicationPackageBuilder = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.endpoint("default", "default")
.endpoint("foo", "qrs")
.endpoint("us", "default", zone1.region().value(), zone2.region().value())
.athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"));
context.submit(applicationPackageBuilder.build()).deploy();
for (var zone : List.of(zone1, zone2)) {
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud",
"us.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)),
"Expected container endpoints in " + zone);
}
assertEquals(Set.of("application.tenant.global.vespa.oath.cloud",
"foo.application.tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)),
"Expected container endpoints in " + zone3);
}
@Test
void testChangeEndpointCluster() {
var context = tester.newDeploymentContext();
var west = ZoneId.from("prod", "us-west-1");
var east = ZoneId.from("prod", "us-east-3");
var applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
.region(west.region().value())
.region(east.region().value())
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " +
"'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " +
"deployment.xml. Deploying given deployment.xml will remove " +
"[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " +
"[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " +
"<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " +
"https:
}
applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "bar")
.region(west.region().value())
.region(east.region().value())
.allow(ValidationId.globalEndpointChange)
.build();
context.submit(applicationPackage).deploy();
assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId())
.rotations().get(0).clusterId());
}
@Test
void testReadableApplications() {
var db = new MockCuratorDb(tester.controller().system());
var tester = new DeploymentTester(new ControllerTester(db));
var app1 = tester.newDeploymentContext("t1", "a1", "default")
.submit()
.deploy();
var app2 = tester.newDeploymentContext("t2", "a2", "default")
.submit()
.deploy();
assertEquals(2, tester.applications().readable().size());
db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()),
new byte[]{(byte) 0xDE, (byte) 0xAD});
assertEquals(1, tester.applications().readable().size());
try {
tester.applications().asList();
fail("Expected exception");
} catch (Exception ignored) {
}
app1.submit().deploy();
}
@Test
void testClashingEndpointIdAndInstanceName() {
String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
" <instance id=\"default\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"dev\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
" <instance id=\"dev\">\n" +
" <prod>\n" +
" <region active=\"true\">us-west-1</region>\n" +
" </prod>\n" +
" <endpoints>\n" +
" <endpoint id=\"default\" container-id=\"qrs\"/>\n" +
" </endpoints>\n" +
" </instance>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
try {
tester.newDeploymentContext().submit(applicationPackage);
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'",
e.getMessage());
}
}
@Test
void testTestPackageWarnings() {
String deploymentXml = "<deployment version='1.0'>\n" +
" <prod>\n" +
" <region>us-west-1</region>\n" +
" </prod>\n" +
"</deployment>\n";
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
NotificationSource.from(app.application().id()),
List.of("test package has staging tests, so it should also include staging setup",
"see https:
tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true));
}
@Test
void testCompileVersion() {
DeploymentContext context = tester.newDeploymentContext();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId());
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().overrideConfidence(version0, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
context.submit(applicationPackage).deploy();
Version version1 = Version.fromString("7.2");
tester.controllerTester().upgradeSystem(version1);
tester.upgrader().overrideConfidence(version1, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().maintain();
context.deployPlatform(version1);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy();
TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app");
Version version2 = Version.fromString("8.0");
tester.controllerTester().upgradeSystem(version2);
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals("this system has no available versions on specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.normal);
tester.controllerTester().computeVersionStatus();
context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.broken);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals("no suitable, released compile version exists",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.empty()))
.getMessage());
assertEquals("no suitable, released compile version exists for specified major: 8",
assertThrows(IllegalArgumentException.class,
() -> tester.applications().compileVersion(application, OptionalInt.of(8)))
.getMessage());
tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class);
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8)));
tester.upgrader().overrideConfidence(version2, Confidence.low);
tester.controllerTester().computeVersionStatus();
assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7)));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty()));
assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8)));
}
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
ZoneId devZone = devUsEast1.zone();
ZoneId prodZone = productionUsWest1.zone();
String cloudAccount = "012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'");
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'")
.abortJob(stagingTest);
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount),
systemTest.zone(),
stagingTest.zone(),
prodZone);
context.submit(applicationPackage).deploy();
tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone);
context.runJob(devZone, applicationPackage);
for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
}
@Test
void testSubmitWithElementDeprecatedOnPreviousMajor() {
DeploymentContext context = tester.newDeploymentContext();
var applicationPackage = new ApplicationPackageBuilder()
.compileVersion(Version.fromString("8.1"))
.region("us-west-1")
.globalServiceId("qrs")
.build();
try {
context.submit(applicationPackage).deploy();
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7"));
}
}
}
|
This is an ancillary performance question, does the Java proxy interface pass the Object[] in a consistent order? I'm wondering if there could be an optimization for this.
|
public RequestOptions setRequestOptions(Object[] swaggerMethodArguments) {
return CoreUtils.findFirstOfType(swaggerMethodArguments, RequestOptions.class);
}
|
return CoreUtils.findFirstOfType(swaggerMethodArguments, RequestOptions.class);
|
public RequestOptions setRequestOptions(Object[] swaggerMethodArguments) {
return CoreUtils.findFirstOfType(swaggerMethodArguments, RequestOptions.class);
}
|
class SwaggerMethodParser implements HttpResponseDecodeData {
private static final Pattern PATTERN_COLON_SLASH_SLASH = Pattern.compile(":
private final SerializerAdapter serializer;
private final String rawHost;
private final String fullyQualifiedMethodName;
private final HttpMethod httpMethod;
private final String relativePath;
private final List<Substitution> hostSubstitutions = new ArrayList<>();
private final List<Substitution> pathSubstitutions = new ArrayList<>();
private final List<Substitution> querySubstitutions = new ArrayList<>();
private final List<Substitution> formSubstitutions = new ArrayList<>();
private final List<Substitution> headerSubstitutions = new ArrayList<>();
private final HttpHeaders headers = new HttpHeaders();
private final Integer bodyContentMethodParameterIndex;
private final String bodyContentType;
private final Type bodyJavaType;
private final BitSet expectedStatusCodes;
private final Type returnType;
private final Type returnValueWireType;
private final UnexpectedResponseExceptionType[] unexpectedResponseExceptionTypes;
private Map<Integer, UnexpectedExceptionInformation> exceptionMapping;
private UnexpectedExceptionInformation defaultException;
/**
* Create a SwaggerMethodParser object using the provided fully qualified method name.
*
* @param swaggerMethod the Swagger method to parse.
* @param rawHost the raw host value from the @Host annotation. Before this can be used as the host value in an HTTP
* request, it must be processed through the possible host substitutions.
*/
SwaggerMethodParser(Method swaggerMethod, String rawHost) {
this(swaggerMethod, rawHost, JacksonAdapter.createDefaultSerializerAdapter());
}
SwaggerMethodParser(Method swaggerMethod, String rawHost, SerializerAdapter serializer) {
this.serializer = serializer;
this.rawHost = rawHost;
final Class<?> swaggerInterface = swaggerMethod.getDeclaringClass();
fullyQualifiedMethodName = swaggerInterface.getName() + "." + swaggerMethod.getName();
if (swaggerMethod.isAnnotationPresent(Get.class)) {
this.httpMethod = HttpMethod.GET;
this.relativePath = swaggerMethod.getAnnotation(Get.class).value();
} else if (swaggerMethod.isAnnotationPresent(Put.class)) {
this.httpMethod = HttpMethod.PUT;
this.relativePath = swaggerMethod.getAnnotation(Put.class).value();
} else if (swaggerMethod.isAnnotationPresent(Head.class)) {
this.httpMethod = HttpMethod.HEAD;
this.relativePath = swaggerMethod.getAnnotation(Head.class).value();
} else if (swaggerMethod.isAnnotationPresent(Delete.class)) {
this.httpMethod = HttpMethod.DELETE;
this.relativePath = swaggerMethod.getAnnotation(Delete.class).value();
} else if (swaggerMethod.isAnnotationPresent(Post.class)) {
this.httpMethod = HttpMethod.POST;
this.relativePath = swaggerMethod.getAnnotation(Post.class).value();
} else if (swaggerMethod.isAnnotationPresent(Patch.class)) {
this.httpMethod = HttpMethod.PATCH;
this.relativePath = swaggerMethod.getAnnotation(Patch.class).value();
} else {
throw new MissingRequiredAnnotationException(Arrays.asList(Get.class, Put.class, Head.class,
Delete.class, Post.class, Patch.class), swaggerMethod);
}
returnType = swaggerMethod.getGenericReturnType();
final ReturnValueWireType returnValueWireTypeAnnotation =
swaggerMethod.getAnnotation(ReturnValueWireType.class);
if (returnValueWireTypeAnnotation != null) {
Class<?> returnValueWireType = returnValueWireTypeAnnotation.value();
if (returnValueWireType == Base64Url.class
|| returnValueWireType == UnixTime.class
|| returnValueWireType == DateTimeRfc1123.class) {
this.returnValueWireType = returnValueWireType;
} else if (TypeUtil.isTypeOrSubTypeOf(returnValueWireType, List.class)) {
this.returnValueWireType = returnValueWireType.getGenericInterfaces()[0];
} else if (TypeUtil.isTypeOrSubTypeOf(returnValueWireType, Page.class)) {
this.returnValueWireType = returnValueWireType;
} else {
this.returnValueWireType = null;
}
} else {
this.returnValueWireType = null;
}
if (swaggerMethod.isAnnotationPresent(Headers.class)) {
final Headers headersAnnotation = swaggerMethod.getAnnotation(Headers.class);
final String[] headers = headersAnnotation.value();
for (final String header : headers) {
final int colonIndex = header.indexOf(":");
if (colonIndex >= 0) {
final String headerName = header.substring(0, colonIndex).trim();
if (!headerName.isEmpty()) {
final String headerValue = header.substring(colonIndex + 1).trim();
if (!headerValue.isEmpty()) {
if (headerValue.contains(",")) {
this.headers.set(headerName, Arrays.asList(headerValue.split(",")));
} else {
this.headers.set(headerName, headerValue);
}
}
}
}
}
}
final ExpectedResponses expectedResponses = swaggerMethod.getAnnotation(ExpectedResponses.class);
if (expectedResponses != null && expectedResponses.value().length > 0) {
expectedStatusCodes = new BitSet();
for (int code : expectedResponses.value()) {
expectedStatusCodes.set(code);
}
} else {
expectedStatusCodes = null;
}
unexpectedResponseExceptionTypes = swaggerMethod.getAnnotationsByType(UnexpectedResponseExceptionType.class);
Integer bodyContentMethodParameterIndex = null;
String bodyContentType = null;
Type bodyJavaType = null;
final Annotation[][] allParametersAnnotations = swaggerMethod.getParameterAnnotations();
for (int parameterIndex = 0; parameterIndex < allParametersAnnotations.length; ++parameterIndex) {
final Annotation[] parameterAnnotations = swaggerMethod.getParameterAnnotations()[parameterIndex];
for (final Annotation annotation : parameterAnnotations) {
final Class<? extends Annotation> annotationType = annotation.annotationType();
if (annotationType.equals(HostParam.class)) {
final HostParam hostParamAnnotation = (HostParam) annotation;
hostSubstitutions.add(new Substitution(hostParamAnnotation.value(), parameterIndex,
!hostParamAnnotation.encoded()));
} else if (annotationType.equals(PathParam.class)) {
final PathParam pathParamAnnotation = (PathParam) annotation;
pathSubstitutions.add(new Substitution(pathParamAnnotation.value(), parameterIndex,
!pathParamAnnotation.encoded()));
} else if (annotationType.equals(QueryParam.class)) {
final QueryParam queryParamAnnotation = (QueryParam) annotation;
querySubstitutions.add(new Substitution(queryParamAnnotation.value(), parameterIndex,
!queryParamAnnotation.encoded()));
} else if (annotationType.equals(HeaderParam.class)) {
final HeaderParam headerParamAnnotation = (HeaderParam) annotation;
headerSubstitutions.add(new Substitution(headerParamAnnotation.value(), parameterIndex, false));
} else if (annotationType.equals(BodyParam.class)) {
final BodyParam bodyParamAnnotation = (BodyParam) annotation;
bodyContentMethodParameterIndex = parameterIndex;
bodyContentType = bodyParamAnnotation.value();
bodyJavaType = swaggerMethod.getGenericParameterTypes()[parameterIndex];
} else if (annotationType.equals(FormParam.class)) {
final FormParam formParamAnnotation = (FormParam) annotation;
formSubstitutions.add(new Substitution(formParamAnnotation.value(), parameterIndex,
!formParamAnnotation.encoded()));
bodyContentType = ContentType.APPLICATION_X_WWW_FORM_URLENCODED;
bodyJavaType = String.class;
}
}
}
this.bodyContentMethodParameterIndex = bodyContentMethodParameterIndex;
this.bodyContentType = bodyContentType;
this.bodyJavaType = bodyJavaType;
}
/**
* Get the fully qualified method that was called to invoke this HTTP request.
*
* @return the fully qualified method that was called to invoke this HTTP request
*/
public String getFullyQualifiedMethodName() {
return fullyQualifiedMethodName;
}
/**
* Get the HTTP method that will be used to complete the Swagger method's request.
*
* @return the HTTP method that will be used to complete the Swagger method's request
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Sets the scheme and host to use for HTTP requests for this Swagger method.
*
* @param swaggerMethodArguments The arguments to use for scheme and host substitutions.
* @param urlBuilder The {@link UrlBuilder} that will have its scheme and host set.
*/
public void setSchemeAndHost(Object[] swaggerMethodArguments, UrlBuilder urlBuilder) {
final String substitutedHost = applySubstitutions(rawHost, hostSubstitutions, swaggerMethodArguments);
final String[] substitutedHostParts = PATTERN_COLON_SLASH_SLASH.split(substitutedHost);
if (substitutedHostParts.length >= 2) {
urlBuilder.setScheme(substitutedHostParts[0]);
urlBuilder.setHost(substitutedHostParts[1]);
} else if (substitutedHostParts.length == 1) {
urlBuilder.setScheme(substitutedHostParts[0]);
urlBuilder.setHost(substitutedHost);
} else {
urlBuilder.setHost(substitutedHost);
}
}
/**
* Get the path that will be used to complete the Swagger method's request.
*
* @param methodArguments the method arguments to use with the path substitutions
* @return the path value with its placeholders replaced by the matching substitutions
*/
public String setPath(Object[] methodArguments) {
return applySubstitutions(relativePath, pathSubstitutions, methodArguments);
}
/**
* Sets the encoded query parameters that have been added to this value based on the provided method arguments into
* the passed {@link UrlBuilder}.
*
* @param swaggerMethodArguments the arguments that will be used to create the query parameters' values
* @param urlBuilder The {@link UrlBuilder} where the encoded query parameters will be set.
*/
public void setEncodedQueryParameters(Object[] swaggerMethodArguments, UrlBuilder urlBuilder) {
if (swaggerMethodArguments == null) {
return;
}
for (Substitution substitution : querySubstitutions) {
final int parameterIndex = substitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[substitution.getMethodParameterIndex()];
String parameterValue = serialize(serializer, methodArgument);
if (parameterValue != null) {
if (substitution.shouldEncode()) {
parameterValue = UrlEscapers.QUERY_ESCAPER.escape(parameterValue);
}
urlBuilder.setQueryParameter(substitution.getUrlParameterName(), parameterValue);
}
}
}
}
/**
* Sets the headers that have been added to this value based on the provided method arguments into the passed
* {@link HttpHeaders}.
*
* @param swaggerMethodArguments The arguments that will be used to create the headers' values.
* @param httpHeaders The {@link HttpHeaders} where the header values will be set.
*/
public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders httpHeaders) {
for (HttpHeader header : headers) {
httpHeaders.set(header.getName(), header.getValuesList());
}
if (swaggerMethodArguments == null) {
return;
}
for (Substitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<String, ?> headerCollection =
(Map<String, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<String, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
httpHeaders.set(headerName, headerValue);
}
}
} else {
final String headerName = headerSubstitution.getUrlParameterName();
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
httpHeaders.set(headerName, headerValue);
}
}
}
}
}
/**
* Get the {@link Context} passed into the proxy method.
*
* @param swaggerMethodArguments the arguments passed to the proxy method
* @return the context, or {@link Context
*/
public Context setContext(Object[] swaggerMethodArguments) {
Context context = CoreUtils.findFirstOfType(swaggerMethodArguments, Context.class);
return (context != null) ? context : Context.NONE;
}
/**
* Get the {@link RequestOptions} passed into the proxy method.
*
* @param swaggerMethodArguments the arguments passed to the proxy method
* @return the request options
*/
/**
* Get whether or not the provided response status code is one of the expected status codes for this Swagger
* method.
*
* 1. If the returned int[] is null, then all 2XX status codes are considered as success code.
* 2. If the returned int[] is not-null, only the codes in the array are considered as success code.
*
* @param statusCode the status code that was returned in the HTTP response
* @return whether or not the provided response status code is one of the expected status codes for this Swagger
* method
*/
@Override
public boolean isExpectedResponseStatusCode(final int statusCode) {
return expectedStatusCodes == null
? statusCode < 400
: expectedStatusCodes.get(statusCode);
}
/**
* Get the {@link UnexpectedExceptionInformation} that will be used to generate a RestException if the HTTP response
* status code is not one of the expected status codes.
*
* If an UnexpectedExceptionInformation is not found for the status code the default UnexpectedExceptionInformation
* will be returned.
*
* @param code Exception HTTP status code return from a REST API.
* @return the UnexpectedExceptionInformation to generate an exception to throw or return.
*/
@Override
public UnexpectedExceptionInformation getUnexpectedException(int code) {
if (exceptionMapping == null) {
exceptionMapping = processUnexpectedResponseExceptionTypes();
}
return exceptionMapping.getOrDefault(code, defaultException);
}
/**
* Get the object to be used as the value of the HTTP request.
*
* @param swaggerMethodArguments the method arguments to get the value object from
* @return the object that will be used as the body of the HTTP request
*/
public Object setBody(Object[] swaggerMethodArguments) {
Object result = null;
if (bodyContentMethodParameterIndex != null
&& swaggerMethodArguments != null
&& 0 <= bodyContentMethodParameterIndex
&& bodyContentMethodParameterIndex < swaggerMethodArguments.length) {
result = swaggerMethodArguments[bodyContentMethodParameterIndex];
}
if (!CoreUtils.isNullOrEmpty(formSubstitutions) && swaggerMethodArguments != null) {
result = formSubstitutions.stream()
.map(substitution -> serializeFormData(serializer, substitution.getUrlParameterName(),
swaggerMethodArguments[substitution.getMethodParameterIndex()], substitution.shouldEncode()))
.filter(Objects::nonNull)
.collect(Collectors.joining("&"));
}
return result;
}
/**
* Get the Content-Type of the body of this Swagger method.
*
* @return the Content-Type of the body of this Swagger method
*/
public String getBodyContentType() {
return bodyContentType;
}
/**
* Get the return type for the method that this object describes.
*
* @return the return type for the method that this object describes.
*/
@Override
public Type getReturnType() {
return returnType;
}
/**
* Get the type of the body parameter to this method, if present.
*
* @return the return type of the body parameter to this method
*/
public Type getBodyJavaType() {
return bodyJavaType;
}
/**
* Get the type that the return value will be send across the network as. If returnValueWireType is not null, then
* the raw HTTP response body will need to parsed to this type and then converted to the actual returnType.
*
* @return the type that the raw HTTP response body will be sent as
*/
@Override
public Type getReturnValueWireType() {
return returnValueWireType;
}
private static String serialize(SerializerAdapter serializer, Object value) {
if (value == null) {
return null;
}
if (value instanceof String) {
return (String) value;
} else {
return serializer.serializeRaw(value);
}
}
private static String serializeFormData(SerializerAdapter serializer, String key, Object value,
boolean shouldEncode) {
if (value == null) {
return null;
}
String encodedKey = UrlEscapers.FORM_ESCAPER.escape(key);
if (value instanceof List<?>) {
return ((List<?>) value).stream()
.map(element -> serializeAndEncodeFormValue(serializer, element, shouldEncode))
.filter(Objects::nonNull)
.map(formValue -> encodedKey + "=" + formValue)
.collect(Collectors.joining("&"));
} else {
return encodedKey + "=" + serializeAndEncodeFormValue(serializer, value, shouldEncode);
}
}
private static String serializeAndEncodeFormValue(SerializerAdapter serializer, Object value,
boolean shouldEncode) {
if (value == null) {
return null;
}
String serializedValue = serializer.serializeRaw(value);
return shouldEncode ? UrlEscapers.FORM_ESCAPER.escape(serializedValue) : serializedValue;
}
private String applySubstitutions(String originalValue, Iterable<Substitution> substitutions,
Object[] methodArguments) {
String result = originalValue;
if (methodArguments != null) {
for (Substitution substitution : substitutions) {
final int substitutionParameterIndex = substitution.getMethodParameterIndex();
if (0 <= substitutionParameterIndex && substitutionParameterIndex < methodArguments.length) {
final Object methodArgument = methodArguments[substitutionParameterIndex];
String substitutionValue = serialize(serializer, methodArgument);
if (substitutionValue != null && !substitutionValue.isEmpty() && substitution.shouldEncode()) {
substitutionValue = UrlEscapers.PATH_ESCAPER.escape(substitutionValue);
}
if (substitutionValue == null) {
substitutionValue = "";
}
result = result.replace("{" + substitution.getUrlParameterName() + "}", substitutionValue);
}
}
}
return result;
}
private Map<Integer, UnexpectedExceptionInformation> processUnexpectedResponseExceptionTypes() {
HashMap<Integer, UnexpectedExceptionInformation> exceptionHashMap = new HashMap<>();
for (UnexpectedResponseExceptionType exceptionAnnotation : unexpectedResponseExceptionTypes) {
UnexpectedExceptionInformation exception = new UnexpectedExceptionInformation(exceptionAnnotation.value());
if (exceptionAnnotation.code().length == 0) {
defaultException = exception;
} else {
for (int statusCode : exceptionAnnotation.code()) {
exceptionHashMap.put(statusCode, exception);
}
}
}
if (defaultException == null) {
defaultException = new UnexpectedExceptionInformation(HttpResponseException.class);
}
return exceptionHashMap;
}
}
|
class SwaggerMethodParser implements HttpResponseDecodeData {
private static final Pattern PATTERN_COLON_SLASH_SLASH = Pattern.compile(":
private final SerializerAdapter serializer;
private final String rawHost;
private final String fullyQualifiedMethodName;
private final HttpMethod httpMethod;
private final String relativePath;
private final List<Substitution> hostSubstitutions = new ArrayList<>();
private final List<Substitution> pathSubstitutions = new ArrayList<>();
private final List<Substitution> querySubstitutions = new ArrayList<>();
private final List<Substitution> formSubstitutions = new ArrayList<>();
private final List<Substitution> headerSubstitutions = new ArrayList<>();
private final HttpHeaders headers = new HttpHeaders();
private final Integer bodyContentMethodParameterIndex;
private final String bodyContentType;
private final Type bodyJavaType;
private final BitSet expectedStatusCodes;
private final Type returnType;
private final Type returnValueWireType;
private final UnexpectedResponseExceptionType[] unexpectedResponseExceptionTypes;
private Map<Integer, UnexpectedExceptionInformation> exceptionMapping;
private UnexpectedExceptionInformation defaultException;
/**
* Create a SwaggerMethodParser object using the provided fully qualified method name.
*
* @param swaggerMethod the Swagger method to parse.
* @param rawHost the raw host value from the @Host annotation. Before this can be used as the host value in an HTTP
* request, it must be processed through the possible host substitutions.
*/
SwaggerMethodParser(Method swaggerMethod, String rawHost) {
this(swaggerMethod, rawHost, JacksonAdapter.createDefaultSerializerAdapter());
}
SwaggerMethodParser(Method swaggerMethod, String rawHost, SerializerAdapter serializer) {
this.serializer = serializer;
this.rawHost = rawHost;
final Class<?> swaggerInterface = swaggerMethod.getDeclaringClass();
fullyQualifiedMethodName = swaggerInterface.getName() + "." + swaggerMethod.getName();
if (swaggerMethod.isAnnotationPresent(Get.class)) {
this.httpMethod = HttpMethod.GET;
this.relativePath = swaggerMethod.getAnnotation(Get.class).value();
} else if (swaggerMethod.isAnnotationPresent(Put.class)) {
this.httpMethod = HttpMethod.PUT;
this.relativePath = swaggerMethod.getAnnotation(Put.class).value();
} else if (swaggerMethod.isAnnotationPresent(Head.class)) {
this.httpMethod = HttpMethod.HEAD;
this.relativePath = swaggerMethod.getAnnotation(Head.class).value();
} else if (swaggerMethod.isAnnotationPresent(Delete.class)) {
this.httpMethod = HttpMethod.DELETE;
this.relativePath = swaggerMethod.getAnnotation(Delete.class).value();
} else if (swaggerMethod.isAnnotationPresent(Post.class)) {
this.httpMethod = HttpMethod.POST;
this.relativePath = swaggerMethod.getAnnotation(Post.class).value();
} else if (swaggerMethod.isAnnotationPresent(Patch.class)) {
this.httpMethod = HttpMethod.PATCH;
this.relativePath = swaggerMethod.getAnnotation(Patch.class).value();
} else {
throw new MissingRequiredAnnotationException(Arrays.asList(Get.class, Put.class, Head.class,
Delete.class, Post.class, Patch.class), swaggerMethod);
}
returnType = swaggerMethod.getGenericReturnType();
final ReturnValueWireType returnValueWireTypeAnnotation =
swaggerMethod.getAnnotation(ReturnValueWireType.class);
if (returnValueWireTypeAnnotation != null) {
Class<?> returnValueWireType = returnValueWireTypeAnnotation.value();
if (returnValueWireType == Base64Url.class
|| returnValueWireType == UnixTime.class
|| returnValueWireType == DateTimeRfc1123.class) {
this.returnValueWireType = returnValueWireType;
} else if (TypeUtil.isTypeOrSubTypeOf(returnValueWireType, List.class)) {
this.returnValueWireType = returnValueWireType.getGenericInterfaces()[0];
} else if (TypeUtil.isTypeOrSubTypeOf(returnValueWireType, Page.class)) {
this.returnValueWireType = returnValueWireType;
} else {
this.returnValueWireType = null;
}
} else {
this.returnValueWireType = null;
}
if (swaggerMethod.isAnnotationPresent(Headers.class)) {
final Headers headersAnnotation = swaggerMethod.getAnnotation(Headers.class);
final String[] headers = headersAnnotation.value();
for (final String header : headers) {
final int colonIndex = header.indexOf(":");
if (colonIndex >= 0) {
final String headerName = header.substring(0, colonIndex).trim();
if (!headerName.isEmpty()) {
final String headerValue = header.substring(colonIndex + 1).trim();
if (!headerValue.isEmpty()) {
if (headerValue.contains(",")) {
this.headers.set(headerName, Arrays.asList(headerValue.split(",")));
} else {
this.headers.set(headerName, headerValue);
}
}
}
}
}
}
final ExpectedResponses expectedResponses = swaggerMethod.getAnnotation(ExpectedResponses.class);
if (expectedResponses != null && expectedResponses.value().length > 0) {
expectedStatusCodes = new BitSet();
for (int code : expectedResponses.value()) {
expectedStatusCodes.set(code);
}
} else {
expectedStatusCodes = null;
}
unexpectedResponseExceptionTypes = swaggerMethod.getAnnotationsByType(UnexpectedResponseExceptionType.class);
Integer bodyContentMethodParameterIndex = null;
String bodyContentType = null;
Type bodyJavaType = null;
final Annotation[][] allParametersAnnotations = swaggerMethod.getParameterAnnotations();
for (int parameterIndex = 0; parameterIndex < allParametersAnnotations.length; ++parameterIndex) {
final Annotation[] parameterAnnotations = swaggerMethod.getParameterAnnotations()[parameterIndex];
for (final Annotation annotation : parameterAnnotations) {
final Class<? extends Annotation> annotationType = annotation.annotationType();
if (annotationType.equals(HostParam.class)) {
final HostParam hostParamAnnotation = (HostParam) annotation;
hostSubstitutions.add(new Substitution(hostParamAnnotation.value(), parameterIndex,
!hostParamAnnotation.encoded()));
} else if (annotationType.equals(PathParam.class)) {
final PathParam pathParamAnnotation = (PathParam) annotation;
pathSubstitutions.add(new Substitution(pathParamAnnotation.value(), parameterIndex,
!pathParamAnnotation.encoded()));
} else if (annotationType.equals(QueryParam.class)) {
final QueryParam queryParamAnnotation = (QueryParam) annotation;
querySubstitutions.add(new Substitution(queryParamAnnotation.value(), parameterIndex,
!queryParamAnnotation.encoded()));
} else if (annotationType.equals(HeaderParam.class)) {
final HeaderParam headerParamAnnotation = (HeaderParam) annotation;
headerSubstitutions.add(new Substitution(headerParamAnnotation.value(), parameterIndex, false));
} else if (annotationType.equals(BodyParam.class)) {
final BodyParam bodyParamAnnotation = (BodyParam) annotation;
bodyContentMethodParameterIndex = parameterIndex;
bodyContentType = bodyParamAnnotation.value();
bodyJavaType = swaggerMethod.getGenericParameterTypes()[parameterIndex];
} else if (annotationType.equals(FormParam.class)) {
final FormParam formParamAnnotation = (FormParam) annotation;
formSubstitutions.add(new Substitution(formParamAnnotation.value(), parameterIndex,
!formParamAnnotation.encoded()));
bodyContentType = ContentType.APPLICATION_X_WWW_FORM_URLENCODED;
bodyJavaType = String.class;
}
}
}
this.bodyContentMethodParameterIndex = bodyContentMethodParameterIndex;
this.bodyContentType = bodyContentType;
this.bodyJavaType = bodyJavaType;
}
/**
* Get the fully qualified method that was called to invoke this HTTP request.
*
* @return the fully qualified method that was called to invoke this HTTP request
*/
public String getFullyQualifiedMethodName() {
return fullyQualifiedMethodName;
}
/**
* Get the HTTP method that will be used to complete the Swagger method's request.
*
* @return the HTTP method that will be used to complete the Swagger method's request
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Sets the scheme and host to use for HTTP requests for this Swagger method.
*
* @param swaggerMethodArguments The arguments to use for scheme and host substitutions.
* @param urlBuilder The {@link UrlBuilder} that will have its scheme and host set.
*/
public void setSchemeAndHost(Object[] swaggerMethodArguments, UrlBuilder urlBuilder) {
final String substitutedHost = applySubstitutions(rawHost, hostSubstitutions, swaggerMethodArguments);
final String[] substitutedHostParts = PATTERN_COLON_SLASH_SLASH.split(substitutedHost);
if (substitutedHostParts.length >= 2) {
urlBuilder.setScheme(substitutedHostParts[0]);
urlBuilder.setHost(substitutedHostParts[1]);
} else if (substitutedHostParts.length == 1) {
urlBuilder.setScheme(substitutedHostParts[0]);
urlBuilder.setHost(substitutedHost);
} else {
urlBuilder.setHost(substitutedHost);
}
}
/**
* Get the path that will be used to complete the Swagger method's request.
*
* @param methodArguments the method arguments to use with the path substitutions
* @return the path value with its placeholders replaced by the matching substitutions
*/
public String setPath(Object[] methodArguments) {
return applySubstitutions(relativePath, pathSubstitutions, methodArguments);
}
/**
* Sets the encoded query parameters that have been added to this value based on the provided method arguments into
* the passed {@link UrlBuilder}.
*
* @param swaggerMethodArguments the arguments that will be used to create the query parameters' values
* @param urlBuilder The {@link UrlBuilder} where the encoded query parameters will be set.
*/
public void setEncodedQueryParameters(Object[] swaggerMethodArguments, UrlBuilder urlBuilder) {
if (swaggerMethodArguments == null) {
return;
}
for (Substitution substitution : querySubstitutions) {
final int parameterIndex = substitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[substitution.getMethodParameterIndex()];
String parameterValue = serialize(serializer, methodArgument);
if (parameterValue != null) {
if (substitution.shouldEncode()) {
parameterValue = UrlEscapers.QUERY_ESCAPER.escape(parameterValue);
}
urlBuilder.setQueryParameter(substitution.getUrlParameterName(), parameterValue);
}
}
}
}
/**
* Sets the headers that have been added to this value based on the provided method arguments into the passed
* {@link HttpHeaders}.
*
* @param swaggerMethodArguments The arguments that will be used to create the headers' values.
* @param httpHeaders The {@link HttpHeaders} where the header values will be set.
*/
public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders httpHeaders) {
for (HttpHeader header : headers) {
httpHeaders.set(header.getName(), header.getValuesList());
}
if (swaggerMethodArguments == null) {
return;
}
for (Substitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<String, ?> headerCollection =
(Map<String, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<String, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
httpHeaders.set(headerName, headerValue);
}
}
} else {
final String headerName = headerSubstitution.getUrlParameterName();
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
httpHeaders.set(headerName, headerValue);
}
}
}
}
}
/**
* Get the {@link Context} passed into the proxy method.
*
* @param swaggerMethodArguments the arguments passed to the proxy method
* @return the context, or {@link Context
*/
public Context setContext(Object[] swaggerMethodArguments) {
Context context = CoreUtils.findFirstOfType(swaggerMethodArguments, Context.class);
return (context != null) ? context : Context.NONE;
}
/**
* Get the {@link RequestOptions} passed into the proxy method.
*
* @param swaggerMethodArguments the arguments passed to the proxy method
* @return the request options
*/
/**
* Get whether or not the provided response status code is one of the expected status codes for this Swagger
* method.
*
* 1. If the returned int[] is null, then all 2XX status codes are considered as success code.
* 2. If the returned int[] is not-null, only the codes in the array are considered as success code.
*
* @param statusCode the status code that was returned in the HTTP response
* @return whether or not the provided response status code is one of the expected status codes for this Swagger
* method
*/
@Override
public boolean isExpectedResponseStatusCode(final int statusCode) {
return expectedStatusCodes == null
? statusCode < 400
: expectedStatusCodes.get(statusCode);
}
/**
* Get the {@link UnexpectedExceptionInformation} that will be used to generate a RestException if the HTTP response
* status code is not one of the expected status codes.
*
* If an UnexpectedExceptionInformation is not found for the status code the default UnexpectedExceptionInformation
* will be returned.
*
* @param code Exception HTTP status code return from a REST API.
* @return the UnexpectedExceptionInformation to generate an exception to throw or return.
*/
@Override
public UnexpectedExceptionInformation getUnexpectedException(int code) {
if (exceptionMapping == null) {
exceptionMapping = processUnexpectedResponseExceptionTypes();
}
return exceptionMapping.getOrDefault(code, defaultException);
}
/**
* Get the object to be used as the value of the HTTP request.
*
* @param swaggerMethodArguments the method arguments to get the value object from
* @return the object that will be used as the body of the HTTP request
*/
public Object setBody(Object[] swaggerMethodArguments) {
Object result = null;
if (bodyContentMethodParameterIndex != null
&& swaggerMethodArguments != null
&& 0 <= bodyContentMethodParameterIndex
&& bodyContentMethodParameterIndex < swaggerMethodArguments.length) {
result = swaggerMethodArguments[bodyContentMethodParameterIndex];
}
if (!CoreUtils.isNullOrEmpty(formSubstitutions) && swaggerMethodArguments != null) {
result = formSubstitutions.stream()
.map(substitution -> serializeFormData(serializer, substitution.getUrlParameterName(),
swaggerMethodArguments[substitution.getMethodParameterIndex()], substitution.shouldEncode()))
.filter(Objects::nonNull)
.collect(Collectors.joining("&"));
}
return result;
}
/**
* Get the Content-Type of the body of this Swagger method.
*
* @return the Content-Type of the body of this Swagger method
*/
public String getBodyContentType() {
return bodyContentType;
}
/**
* Get the return type for the method that this object describes.
*
* @return the return type for the method that this object describes.
*/
@Override
public Type getReturnType() {
return returnType;
}
/**
* Get the type of the body parameter to this method, if present.
*
* @return the return type of the body parameter to this method
*/
public Type getBodyJavaType() {
return bodyJavaType;
}
/**
* Get the type that the return value will be send across the network as. If returnValueWireType is not null, then
* the raw HTTP response body will need to parsed to this type and then converted to the actual returnType.
*
* @return the type that the raw HTTP response body will be sent as
*/
@Override
public Type getReturnValueWireType() {
return returnValueWireType;
}
private static String serialize(SerializerAdapter serializer, Object value) {
if (value == null) {
return null;
}
if (value instanceof String) {
return (String) value;
} else {
return serializer.serializeRaw(value);
}
}
private static String serializeFormData(SerializerAdapter serializer, String key, Object value,
boolean shouldEncode) {
if (value == null) {
return null;
}
String encodedKey = UrlEscapers.FORM_ESCAPER.escape(key);
if (value instanceof List<?>) {
return ((List<?>) value).stream()
.map(element -> serializeAndEncodeFormValue(serializer, element, shouldEncode))
.filter(Objects::nonNull)
.map(formValue -> encodedKey + "=" + formValue)
.collect(Collectors.joining("&"));
} else {
return encodedKey + "=" + serializeAndEncodeFormValue(serializer, value, shouldEncode);
}
}
private static String serializeAndEncodeFormValue(SerializerAdapter serializer, Object value,
boolean shouldEncode) {
if (value == null) {
return null;
}
String serializedValue = serializer.serializeRaw(value);
return shouldEncode ? UrlEscapers.FORM_ESCAPER.escape(serializedValue) : serializedValue;
}
private String applySubstitutions(String originalValue, Iterable<Substitution> substitutions,
Object[] methodArguments) {
String result = originalValue;
if (methodArguments != null) {
for (Substitution substitution : substitutions) {
final int substitutionParameterIndex = substitution.getMethodParameterIndex();
if (0 <= substitutionParameterIndex && substitutionParameterIndex < methodArguments.length) {
final Object methodArgument = methodArguments[substitutionParameterIndex];
String substitutionValue = serialize(serializer, methodArgument);
if (substitutionValue != null && !substitutionValue.isEmpty() && substitution.shouldEncode()) {
substitutionValue = UrlEscapers.PATH_ESCAPER.escape(substitutionValue);
}
if (substitutionValue == null) {
substitutionValue = "";
}
result = result.replace("{" + substitution.getUrlParameterName() + "}", substitutionValue);
}
}
}
return result;
}
private Map<Integer, UnexpectedExceptionInformation> processUnexpectedResponseExceptionTypes() {
HashMap<Integer, UnexpectedExceptionInformation> exceptionHashMap = new HashMap<>();
for (UnexpectedResponseExceptionType exceptionAnnotation : unexpectedResponseExceptionTypes) {
UnexpectedExceptionInformation exception = new UnexpectedExceptionInformation(exceptionAnnotation.value());
if (exceptionAnnotation.code().length == 0) {
defaultException = exception;
} else {
for (int statusCode : exceptionAnnotation.code()) {
exceptionHashMap.put(statusCode, exception);
}
}
}
if (defaultException == null) {
defaultException = new UnexpectedExceptionInformation(HttpResponseException.class);
}
return exceptionHashMap;
}
}
|
Why we need to update limit in *last* segment for each write?
|
public boolean write(BaseRow row) throws IOException {
try {
this.serializer.serializeToPages(row, this.recordCollector);
currentDataBufferOffset = this.recordCollector.getCurrentOffset();
numBytesInLastBuffer = this.recordCollector.getCurrentPositionInSegment();
recordCount++;
for (BufferIterator iterator : iterators) {
iterator.recordBuffer.updateLimitInLastSegment(numBytesInLastBuffer);
}
return true;
} catch (EOFException e) {
return false;
}
}
|
iterator.recordBuffer.updateLimitInLastSegment(numBytesInLastBuffer);
|
public boolean write(BaseRow row) throws IOException {
try {
this.serializer.serializeToPages(row, this.recordCollector);
currentDataBufferOffset = this.recordCollector.getCurrentOffset();
numBytesInLastBuffer = this.recordCollector.getCurrentPositionInSegment();
recordCount++;
return true;
} catch (EOFException e) {
return false;
}
}
|
class InMemoryBuffer implements Closeable {
private final int segmentSize;
private final ArrayList<MemorySegment> freeMemory;
private final AbstractRowSerializer serializer;
private final ArrayList<MemorySegment> recordBufferSegments;
private final SimpleCollectingOutputView recordCollector;
private long currentDataBufferOffset;
private int numBytesInLastBuffer;
private int recordCount;
private LinkedList<BufferIterator> iterators;
private InMemoryBuffer(AbstractRowSerializer serializer) {
this.segmentSize = memory.get(0).size();
this.freeMemory = new ArrayList<>(memory);
this.serializer = (AbstractRowSerializer) serializer.duplicate();
this.recordBufferSegments = new ArrayList<>(memory.size());
this.recordCollector = new SimpleCollectingOutputView(this.recordBufferSegments,
new ListMemorySegmentSource(this.freeMemory), this.segmentSize);
this.recordCount = 0;
this.iterators = new LinkedList<>();
}
private void reset() {
this.currentDataBufferOffset = 0;
this.recordCount = 0;
this.freeMemory.addAll(this.recordBufferSegments);
this.recordBufferSegments.clear();
this.recordCollector.reset();
iterators.clear();
}
@Override
public void close() {
this.freeMemory.clear();
this.recordBufferSegments.clear();
iterators.clear();
}
private ArrayList<MemorySegment> getRecordBufferSegments() {
return recordBufferSegments;
}
private long getCurrentDataBufferOffset() {
return currentDataBufferOffset;
}
private int getNumRecordBuffers() {
int result = (int) (currentDataBufferOffset / segmentSize);
long mod = currentDataBufferOffset % segmentSize;
if (mod != 0) {
result += 1;
}
return result;
}
private int getNumBytesInLastBuffer() {
return numBytesInLastBuffer;
}
private BufferIterator newIterator(int beginRow, long offset) {
checkArgument(offset >= 0, "`offset` can't be negative!");
RandomAccessInputView recordBuffer = new RandomAccessInputView(
this.recordBufferSegments, this.segmentSize, numBytesInLastBuffer);
BufferIterator iterator = new BufferIterator(beginRow, offset, recordBuffer);
iterators.add(iterator);
return iterator;
}
/**
* Iterator of in memory buffer.
*/
public class BufferIterator implements MutableObjectIterator<BinaryRow>, Closeable {
private int beginRow;
private int nextRow;
private RandomAccessInputView recordBuffer;
private BufferIterator(int beginRow, long offset, RandomAccessInputView recordBuffer) {
this.beginRow = beginRow;
this.recordBuffer = recordBuffer;
reset(offset);
}
public void reset(long offset) {
this.nextRow = beginRow;
recordBuffer.setReadPosition(offset);
}
@Override
public BinaryRow next(BinaryRow reuse) throws IOException {
try {
if (nextRow >= recordCount) {
return null;
}
nextRow++;
return (BinaryRow) serializer.mapFromPages(reuse, recordBuffer);
} catch (EOFException e) {
return null;
}
}
@Override
public BinaryRow next() throws IOException {
throw new RuntimeException("Not support!");
}
@Override
public void close() {
iterators.remove(this);
}
}
}
|
class InMemoryBuffer implements Closeable {
private final int segmentSize;
private final ArrayList<MemorySegment> freeMemory;
private final AbstractRowSerializer serializer;
private final ArrayList<MemorySegment> recordBufferSegments;
private final SimpleCollectingOutputView recordCollector;
private long currentDataBufferOffset;
private int numBytesInLastBuffer;
private int recordCount;
private InMemoryBuffer(AbstractRowSerializer serializer) {
this.segmentSize = memory.get(0).size();
this.freeMemory = new ArrayList<>(memory);
this.serializer = (AbstractRowSerializer) serializer.duplicate();
this.recordBufferSegments = new ArrayList<>(memory.size());
this.recordCollector = new SimpleCollectingOutputView(this.recordBufferSegments,
new ListMemorySegmentSource(this.freeMemory), this.segmentSize);
this.recordCount = 0;
}
private void reset() {
this.currentDataBufferOffset = 0;
this.recordCount = 0;
this.freeMemory.addAll(this.recordBufferSegments);
this.recordBufferSegments.clear();
this.recordCollector.reset();
}
@Override
public void close() {
this.freeMemory.clear();
this.recordBufferSegments.clear();
}
private ArrayList<MemorySegment> getRecordBufferSegments() {
return recordBufferSegments;
}
private long getCurrentDataBufferOffset() {
return currentDataBufferOffset;
}
private int getNumRecordBuffers() {
int result = (int) (currentDataBufferOffset / segmentSize);
long mod = currentDataBufferOffset % segmentSize;
if (mod != 0) {
result += 1;
}
return result;
}
private int getNumBytesInLastBuffer() {
return numBytesInLastBuffer;
}
private InMemoryBufferIterator newIterator(int beginRow, long offset) {
checkArgument(offset >= 0, "`offset` can't be negative!");
RandomAccessInputView recordBuffer = new RandomAccessInputView(
this.recordBufferSegments, this.segmentSize, numBytesInLastBuffer);
return new InMemoryBufferIterator(recordCount, beginRow, offset, recordBuffer);
}
/**
* Iterator of in memory buffer.
*/
public class InMemoryBufferIterator implements MutableObjectIterator<BinaryRow>, Closeable {
private final int beginRow;
private int nextRow;
private RandomAccessInputView recordBuffer;
private int expectedRecordCount;
private InMemoryBufferIterator(int expectedRecordCount, int beginRow, long offset, RandomAccessInputView recordBuffer) {
this.beginRow = beginRow;
this.recordBuffer = recordBuffer;
reset(expectedRecordCount, offset);
}
public void reset(int expectedRecordCount, long offset) {
this.nextRow = beginRow;
this.expectedRecordCount = expectedRecordCount;
recordBuffer.setReadPosition(offset);
}
@Override
public BinaryRow next(BinaryRow reuse) throws IOException {
try {
if (expectedRecordCount != recordCount) {
throw new ConcurrentModificationException();
}
if (nextRow >= recordCount) {
return null;
}
nextRow++;
return (BinaryRow) serializer.mapFromPages(reuse, recordBuffer);
} catch (EOFException e) {
return null;
}
}
@Override
public BinaryRow next() throws IOException {
throw new RuntimeException("Not support!");
}
@Override
public void close() {
}
}
}
|
```suggestion throw createLauncherException("unable to resolve the target path:" + e.getMessage()); ```
|
public void execute() {
long start = 0;
if (this.helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(RUN_COMMAND);
this.errStream.println(commandUsageInfo);
return;
}
if (this.debugPort != null) {
System.setProperty(SYSTEM_PROP_BAL_DEBUG, this.debugPort);
}
String[] args = new String[0];
if (!argList.isEmpty()) {
if (!argList.get(0).equals("--")) {
this.projectPath = Paths.get(argList.get(0));
if (RunCommand.JAR_EXTENSION_MATCHER.matches(this.projectPath)) {
CommandUtil.printError(this.errStream, "unsupported option(s) provided for jar execution",
runCmd, true);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
if (argList.size() > 1 && !argList.get(1).equals("--")) {
CommandUtil.printError(this.errStream,
"unmatched command argument found: " + argList.get(1), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
if (argList.size() > 2 && argList.get(1).equals("--")) {
args = argList.subList(2, argList.size()).toArray(new String[0]);
}
} else {
if (argList.size() > 1 && argList.get(0).equals("--")) {
args = argList.subList(1, argList.size()).toArray(new String[0]);
}
}
}
if (sticky == null) {
sticky = false;
}
Project project;
BuildOptions buildOptions = constructBuildOptions();
boolean isSingleFileBuild = false;
if (FileUtils.hasExtension(this.projectPath)) {
try {
if (buildOptions.dumpBuildTime()) {
start = System.currentTimeMillis();
BuildTime.getInstance().timestamp = start;
}
project = SingleFileProject.load(this.projectPath, buildOptions);
if (buildOptions.dumpBuildTime()) {
BuildTime.getInstance().projectLoadDuration = System.currentTimeMillis() - start;
}
} catch (ProjectException e) {
CommandUtil.printError(this.errStream, e.getMessage(), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
isSingleFileBuild = true;
} else {
try {
if (buildOptions.dumpBuildTime()) {
start = System.currentTimeMillis();
BuildTime.getInstance().timestamp = start;
}
project = BuildProject.load(this.projectPath, buildOptions);
if (buildOptions.dumpBuildTime()) {
BuildTime.getInstance().projectLoadDuration = System.currentTimeMillis() - start;
}
} catch (ProjectException e) {
CommandUtil.printError(this.errStream, e.getMessage(), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
}
if (ProjectUtils.isProjectEmpty(project)) {
CommandUtil.printError(this.errStream, "package is empty. Please add at least one .bal file.", null, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
Target target;
try {
if (project.kind().equals(ProjectKind.BUILD_PROJECT)) {
target = new Target(project.targetDir());
} else {
target = new Target(Files.createTempDirectory("ballerina-cache" + System.nanoTime()));
target.setOutputPath(target.getBinPath());
}
} catch (IOException e) {
throw createLauncherException("unable to resolve target path:" + e.getMessage());
} catch (ProjectException e) {
throw createLauncherException("unable to create executable:" + e.getMessage());
}
boolean isPackageModified = isProjectUpdated(project);
TaskExecutor taskExecutor = new TaskExecutor.TaskBuilder()
.addTask(new CleanTargetDirTask(isPackageModified, buildOptions.enableCache()), isSingleFileBuild)
.addTask(new RunBuildToolsTask(outStream), isSingleFileBuild)
.addTask(new ResolveMavenDependenciesTask(outStream))
.addTask(new CompileTask(outStream, errStream, false, false,
isPackageModified, buildOptions.enableCache()))
.addTask(new CreateExecutableTask(outStream, null, target, true))
.addTask(new RunExecutableTask(args, outStream, errStream, target))
.addTask(new DumpBuildTimeTask(outStream), !project.buildOptions().dumpBuildTime())
.build();
taskExecutor.executeTasks(project);
}
|
throw createLauncherException("unable to resolve target path:" + e.getMessage());
|
public void execute() {
long start = 0;
if (this.helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(RUN_COMMAND);
this.errStream.println(commandUsageInfo);
return;
}
if (this.debugPort != null) {
System.setProperty(SYSTEM_PROP_BAL_DEBUG, this.debugPort);
}
String[] args = new String[0];
if (!argList.isEmpty()) {
if (!argList.get(0).equals("--")) {
this.projectPath = Paths.get(argList.get(0));
if (RunCommand.JAR_EXTENSION_MATCHER.matches(this.projectPath)) {
CommandUtil.printError(this.errStream, "unsupported option(s) provided for jar execution",
runCmd, true);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
if (argList.size() > 1 && !argList.get(1).equals("--")) {
CommandUtil.printError(this.errStream,
"unmatched command argument found: " + argList.get(1), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
if (argList.size() > 2 && argList.get(1).equals("--")) {
args = argList.subList(2, argList.size()).toArray(new String[0]);
}
} else {
if (argList.size() > 1 && argList.get(0).equals("--")) {
args = argList.subList(1, argList.size()).toArray(new String[0]);
}
}
}
if (sticky == null) {
sticky = false;
}
Project project;
BuildOptions buildOptions = constructBuildOptions();
boolean isSingleFileBuild = false;
if (FileUtils.hasExtension(this.projectPath)) {
try {
if (buildOptions.dumpBuildTime()) {
start = System.currentTimeMillis();
BuildTime.getInstance().timestamp = start;
}
project = SingleFileProject.load(this.projectPath, buildOptions);
if (buildOptions.dumpBuildTime()) {
BuildTime.getInstance().projectLoadDuration = System.currentTimeMillis() - start;
}
} catch (ProjectException e) {
CommandUtil.printError(this.errStream, e.getMessage(), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
isSingleFileBuild = true;
} else {
try {
if (buildOptions.dumpBuildTime()) {
start = System.currentTimeMillis();
BuildTime.getInstance().timestamp = start;
}
project = BuildProject.load(this.projectPath, buildOptions);
if (buildOptions.dumpBuildTime()) {
BuildTime.getInstance().projectLoadDuration = System.currentTimeMillis() - start;
}
} catch (ProjectException e) {
CommandUtil.printError(this.errStream, e.getMessage(), runCmd, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
}
if (ProjectUtils.isProjectEmpty(project)) {
CommandUtil.printError(this.errStream, "package is empty. Please add at least one .bal file.", null, false);
CommandUtil.exitError(this.exitWhenFinish);
return;
}
Target target;
try {
if (project.kind().equals(ProjectKind.BUILD_PROJECT)) {
target = new Target(project.targetDir());
} else {
target = new Target(Files.createTempDirectory("ballerina-cache" + System.nanoTime()));
target.setOutputPath(target.getBinPath());
}
} catch (IOException e) {
throw createLauncherException("unable to resolve the target path:" + e.getMessage());
} catch (ProjectException e) {
throw createLauncherException("unable to create the executable:" + e.getMessage());
}
boolean isPackageModified = isProjectUpdated(project);
TaskExecutor taskExecutor = new TaskExecutor.TaskBuilder()
.addTask(new CleanTargetDirTask(isPackageModified, buildOptions.enableCache()), isSingleFileBuild)
.addTask(new RunBuildToolsTask(outStream), isSingleFileBuild)
.addTask(new ResolveMavenDependenciesTask(outStream))
.addTask(new CompileTask(outStream, errStream, false, false,
isPackageModified, buildOptions.enableCache()))
.addTask(new CreateExecutableTask(outStream, null, target, true))
.addTask(new RunExecutableTask(args, outStream, errStream, target))
.addTask(new DumpBuildTimeTask(outStream), !project.buildOptions().dumpBuildTime())
.build();
taskExecutor.executeTasks(project);
}
|
class RunCommand implements BLauncherCmd {
private final PrintStream outStream;
private final PrintStream errStream;
private Path projectPath;
private boolean exitWhenFinish;
private static final PathMatcher JAR_EXTENSION_MATCHER =
FileSystems.getDefault().getPathMatcher("glob:**.jar");
@CommandLine.Parameters(description = "Program arguments")
private final List<String> argList = new ArrayList<>();
@CommandLine.Option(names = {"--help", "-h", "?"}, hidden = true)
private boolean helpFlag;
@CommandLine.Option(names = {"--offline"}, description = "Builds offline without downloading dependencies and " +
"then run.")
private Boolean offline;
@CommandLine.Option(names = "--debug", hidden = true)
private String debugPort;
@CommandLine.Option(names = "--dump-bir", hidden = true)
private boolean dumpBIR;
@CommandLine.Option(names = "--observability-included", description = "package observability in the executable " +
"when run is used with a source file or a module.")
private Boolean observabilityIncluded;
@CommandLine.Option(names = "--remote-management", description = "enable management service in the " +
"executable when run is used with a source file or a module.")
private Boolean remoteManagement;
@CommandLine.Option(names = "--sticky", description = "stick to exact versions locked (if exists)")
private Boolean sticky;
@CommandLine.Option(names = "--dump-graph", description = "Print the dependency graph.", hidden = true)
private boolean dumpGraph;
@CommandLine.Option(names = "--dump-raw-graphs", description = "Print all intermediate graphs created in the " +
"dependency resolution process.", hidden = true)
private boolean dumpRawGraphs;
@CommandLine.Option(names = "--generate-config-schema", hidden = true)
private Boolean configSchemaGen;
@CommandLine.Option(names = "--target-dir", description = "target directory path")
private Path targetDir;
@CommandLine.Option(names = "--enable-cache", description = "enable caches for the compilation", hidden = true)
private Boolean enableCache;
@CommandLine.Option(names = "--disable-syntax-tree-caching", hidden = true, description = "disable syntax tree " +
"caching for source files", defaultValue = "false")
private Boolean disableSyntaxTreeCaching;
@CommandLine.Option(names = "--dump-build-time", description = "calculate and dump build time", hidden = true)
private Boolean dumpBuildTime;
private static final String runCmd =
"""
bal run [--debug <port>] <executable-jar>\s
bal run [--offline]
[<ballerina-file | package-path>] [-- program-args...]
\s""";
public RunCommand() {
this.projectPath = Paths.get(System.getProperty(ProjectConstants.USER_DIR));
this.outStream = System.err;
this.errStream = System.err;
}
RunCommand(Path projectPath, PrintStream outStream, boolean exitWhenFinish) {
this.projectPath = projectPath;
this.exitWhenFinish = exitWhenFinish;
this.outStream = outStream;
this.errStream = outStream;
this.offline = true;
}
RunCommand(Path projectPath, PrintStream outStream, boolean exitWhenFinish, Path targetDir) {
this.projectPath = projectPath;
this.exitWhenFinish = exitWhenFinish;
this.outStream = outStream;
this.errStream = outStream;
this.targetDir = targetDir;
this.offline = true;
}
@Override
public String getName() {
return RUN_COMMAND;
}
@Override
public void printLongDesc(StringBuilder out) {
out.append(BLauncherCmd.getCommandUsageInfo(RUN_COMMAND));
}
@Override
public void printUsage(StringBuilder out) {
out.append(" bal run [--debug <port>] <executable-jar>\n");
out.append("""
bal run [--offline] [<balfile> | <project-path>]
[--] [args...]\s
""");
}
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
}
private BuildOptions constructBuildOptions() {
BuildOptions.BuildOptionsBuilder buildOptionsBuilder = BuildOptions.builder();
buildOptionsBuilder
.setCodeCoverage(false)
.setOffline(offline)
.setSkipTests(true)
.setTestReport(false)
.setObservabilityIncluded(observabilityIncluded)
.setRemoteManagement(remoteManagement)
.setSticky(sticky)
.setDumpGraph(dumpGraph)
.setDumpRawGraphs(dumpRawGraphs)
.setConfigSchemaGen(configSchemaGen)
.disableSyntaxTreeCaching(disableSyntaxTreeCaching)
.setDumpBuildTime(dumpBuildTime);
if (targetDir != null) {
buildOptionsBuilder.targetDir(targetDir.toString());
}
return buildOptionsBuilder.build();
}
}
|
class RunCommand implements BLauncherCmd {
private final PrintStream outStream;
private final PrintStream errStream;
private Path projectPath;
private boolean exitWhenFinish;
private static final PathMatcher JAR_EXTENSION_MATCHER =
FileSystems.getDefault().getPathMatcher("glob:**.jar");
@CommandLine.Parameters(description = "Program arguments")
private final List<String> argList = new ArrayList<>();
@CommandLine.Option(names = {"--help", "-h", "?"}, hidden = true)
private boolean helpFlag;
@CommandLine.Option(names = {"--offline"}, description = "Builds offline without downloading dependencies and " +
"then run.")
private Boolean offline;
@CommandLine.Option(names = "--debug", hidden = true)
private String debugPort;
@CommandLine.Option(names = "--dump-bir", hidden = true)
private boolean dumpBIR;
@CommandLine.Option(names = "--observability-included", description = "package observability in the executable " +
"when run is used with a source file or a module.")
private Boolean observabilityIncluded;
@CommandLine.Option(names = "--remote-management", description = "enable management service in the " +
"executable when run is used with a source file or a module.")
private Boolean remoteManagement;
@CommandLine.Option(names = "--sticky", description = "stick to exact versions locked (if exists)")
private Boolean sticky;
@CommandLine.Option(names = "--dump-graph", description = "Print the dependency graph.", hidden = true)
private boolean dumpGraph;
@CommandLine.Option(names = "--dump-raw-graphs", description = "Print all intermediate graphs created in the " +
"dependency resolution process.", hidden = true)
private boolean dumpRawGraphs;
@CommandLine.Option(names = "--generate-config-schema", hidden = true)
private Boolean configSchemaGen;
@CommandLine.Option(names = "--target-dir", description = "target directory path")
private Path targetDir;
@CommandLine.Option(names = "--enable-cache", description = "enable caches for the compilation", hidden = true)
private Boolean enableCache;
@CommandLine.Option(names = "--disable-syntax-tree-caching", hidden = true, description = "disable syntax tree " +
"caching for source files", defaultValue = "false")
private Boolean disableSyntaxTreeCaching;
@CommandLine.Option(names = "--dump-build-time", description = "calculate and dump build time", hidden = true)
private Boolean dumpBuildTime;
@CommandLine.Option(names = "--show-dependency-diagnostics", description = "Show the diagnostics " +
"generated by the dependencies")
private Boolean showDependencyDiagnostics;
private static final String runCmd =
"""
bal run [--debug <port>] <executable-jar>\s
bal run [--offline]
[<ballerina-file | package-path>] [-- program-args...]
\s""";
public RunCommand() {
this.projectPath = Paths.get(System.getProperty(ProjectConstants.USER_DIR));
this.outStream = System.err;
this.errStream = System.err;
}
RunCommand(Path projectPath, PrintStream outStream, boolean exitWhenFinish) {
this.projectPath = projectPath;
this.exitWhenFinish = exitWhenFinish;
this.outStream = outStream;
this.errStream = outStream;
this.offline = true;
}
RunCommand(Path projectPath, PrintStream outStream, boolean exitWhenFinish, Path targetDir) {
this.projectPath = projectPath;
this.exitWhenFinish = exitWhenFinish;
this.outStream = outStream;
this.errStream = outStream;
this.targetDir = targetDir;
this.offline = true;
}
@Override
public String getName() {
return RUN_COMMAND;
}
@Override
public void printLongDesc(StringBuilder out) {
out.append(BLauncherCmd.getCommandUsageInfo(RUN_COMMAND));
}
@Override
public void printUsage(StringBuilder out) {
out.append(" bal run [--debug <port>] <executable-jar>\n");
out.append("""
bal run [--offline] [<balfile> | <project-path>]
[--] [args...]\s
""");
}
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
}
private BuildOptions constructBuildOptions() {
BuildOptions.BuildOptionsBuilder buildOptionsBuilder = BuildOptions.builder();
buildOptionsBuilder
.setCodeCoverage(false)
.setOffline(offline)
.setSkipTests(true)
.setTestReport(false)
.setObservabilityIncluded(observabilityIncluded)
.setRemoteManagement(remoteManagement)
.setSticky(sticky)
.setDumpGraph(dumpGraph)
.setDumpRawGraphs(dumpRawGraphs)
.setConfigSchemaGen(configSchemaGen)
.disableSyntaxTreeCaching(disableSyntaxTreeCaching)
.setDumpBuildTime(dumpBuildTime)
.setShowDependencyDiagnostics(showDependencyDiagnostics);
if (targetDir != null) {
buildOptionsBuilder.targetDir(targetDir.toString());
}
return buildOptionsBuilder.build();
}
}
|
why do we access the parent like this? Is this capture all the possible scenarios?
|
private Optional<NonTerminalNode> getVariableOrObjectFieldNode(NonTerminalNode sNode) {
if (isVariableNode(sNode) || sNode.kind() == SyntaxKind.OBJECT_FIELD) {
return Optional.of(sNode);
} else if (isVariableNode(sNode.parent()) || sNode.parent().kind() == SyntaxKind.OBJECT_FIELD) {
return Optional.of(sNode.parent());
} else if (sNode.kind() == SyntaxKind.LIST_CONSTRUCTOR && sNode.parent().kind() == SyntaxKind.COLLECT_CLAUSE
&& sNode.parent().parent().kind() == SyntaxKind.QUERY_EXPRESSION
&& isVariableNode(sNode.parent().parent().parent())) {
return Optional.of(sNode.parent().parent().parent());
}
return Optional.empty();
}
|
} else if (sNode.kind() == SyntaxKind.LIST_CONSTRUCTOR && sNode.parent().kind() == SyntaxKind.COLLECT_CLAUSE
|
private Optional<NonTerminalNode> getVariableOrObjectFieldNode(NonTerminalNode sNode) {
if (isVariableNode(sNode) || sNode.kind() == SyntaxKind.OBJECT_FIELD) {
return Optional.of(sNode);
} else if (isVariableNode(sNode.parent()) || sNode.parent().kind() == SyntaxKind.OBJECT_FIELD) {
return Optional.of(sNode.parent());
} else if (sNode.parent().kind() == SyntaxKind.COLLECT_CLAUSE
&& sNode.parent().parent().kind() == SyntaxKind.QUERY_EXPRESSION
&& isVariableNode(sNode.parent().parent().parent())) {
return Optional.of(sNode.parent().parent().parent());
}
return Optional.empty();
}
|
class ChangeVariableTypeCodeAction extends TypeCastCodeAction {
public static final String NAME = "Change Variable Type";
public static final Set<String> DIAGNOSTIC_CODES = Set.of("BCE2066", "BCE2068", "BCE2652", "BCE3931");
@Override
public boolean validate(Diagnostic diagnostic, DiagBasedPositionDetails positionDetails,
CodeActionContext context) {
return DIAGNOSTIC_CODES.contains(diagnostic.diagnosticInfo().code()) &&
CodeActionNodeValidator.validate(context.nodeAtRange());
}
/**
* {@inheritDoc}
*/
@Override
public List<CodeAction> getCodeActions(Diagnostic diagnostic,
DiagBasedPositionDetails positionDetails,
CodeActionContext context) {
Optional<TypeSymbol> foundType;
if ("BCE2068".equals(diagnostic.diagnosticInfo().code())) {
foundType = positionDetails.diagnosticProperty(
CodeActionUtil.getDiagPropertyFilterFunction(
DiagBasedPositionDetails.DIAG_PROP_INCOMPATIBLE_TYPES_FOUND_SYMBOL_INDEX));
} else {
foundType = positionDetails.diagnosticProperty(
DiagBasedPositionDetails.DIAG_PROP_INCOMPATIBLE_TYPES_FOUND_SYMBOL_INDEX);
}
if (foundType.isEmpty() || !isValidType(foundType.get())) {
return Collections.emptyList();
}
Optional<NonTerminalNode> variableNode = getVariableOrObjectFieldNode(positionDetails.matchedNode());
if (variableNode.isEmpty()) {
return Collections.emptyList();
}
Optional<Node> typeNode = getTypeNode(variableNode.get(), context);
Optional<String> variableName = getVariableName(variableNode.get());
if (typeNode.isEmpty() || variableName.isEmpty()) {
return Collections.emptyList();
}
Optional<String> typeNodeStr = getTypeNodeStr(typeNode.get());
List<CodeAction> actions = new ArrayList<>();
List<TextEdit> importEdits = new ArrayList<>();
List<String> types;
if ("BCE3931".equals(diagnostic.diagnosticInfo().code())) {
types = Collections.singletonList(((TypeDescTypeSymbol) foundType.get()).typeParameter().get().signature());
} else {
types = CodeActionUtil.getPossibleTypes(foundType.get(), importEdits, context);
}
for (String type : types) {
String typeName = FunctionGenerator.processModuleIDsInText(new ImportsAcceptor(context), type, context);
if (typeNodeStr.isPresent() && typeNodeStr.get().equals(typeName)) {
continue;
}
List<TextEdit> edits = new ArrayList<>();
edits.add(new TextEdit(PositionUtil.toRange(typeNode.get().lineRange()), typeName));
String commandTitle;
if (variableNode.get().kind() == SyntaxKind.CONST_DECLARATION) {
commandTitle = String.format(CommandConstants.CHANGE_CONST_TYPE_TITLE, variableName.get(), typeName);
} else {
commandTitle = String.format(CommandConstants.CHANGE_VAR_TYPE_TITLE, variableName.get(), typeName);
}
edits.addAll(importEdits);
actions.add(CodeActionUtil
.createCodeAction(commandTitle, edits, context.fileUri(), CodeActionKind.QuickFix));
}
return actions;
}
@Override
public String getName() {
return NAME;
}
boolean isVariableNode(NonTerminalNode sNode) {
if (sNode == null || sNode.kind() == SyntaxKind.POSITIONAL_ARG || sNode.kind() == SyntaxKind.NAMED_ARG) {
return false;
}
return sNode.kind() == SyntaxKind.LOCAL_VAR_DECL
|| sNode.kind() == SyntaxKind.MODULE_VAR_DECL
|| sNode.kind() == SyntaxKind.ASSIGNMENT_STATEMENT
|| sNode.kind() == SyntaxKind.CONST_DECLARATION
|| sNode.kind() == SyntaxKind.LET_VAR_DECL;
}
private Optional<String> getTypeNodeStr(Node node) {
if (node.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE) {
SimpleNameReferenceNode sRefNode = (SimpleNameReferenceNode) node;
return Optional.of(sRefNode.name().text());
} else if (node.kind() == SyntaxKind.QUALIFIED_NAME_REFERENCE) {
QualifiedNameReferenceNode qnRefNode = (QualifiedNameReferenceNode) node;
return Optional.of(qnRefNode.modulePrefix().text() + ":" + qnRefNode.identifier().text());
} else if (node instanceof BuiltinSimpleNameReferenceNode) {
return Optional.of(((BuiltinSimpleNameReferenceNode) node).name().text());
}
return Optional.empty();
}
private Optional<Node> getTypeNode(Node matchedNode, CodeActionContext context) {
switch (matchedNode.kind()) {
case LOCAL_VAR_DECL:
return Optional.of(
((VariableDeclarationNode) matchedNode).typedBindingPattern().typeDescriptor());
case MODULE_VAR_DECL:
return Optional.of(
((ModuleVariableDeclarationNode) matchedNode).typedBindingPattern().typeDescriptor());
case ASSIGNMENT_STATEMENT:
Optional<VariableSymbol> optVariableSymbol = getVariableSymbol(context, matchedNode);
if (optVariableSymbol.isEmpty()) {
return Optional.empty();
}
SyntaxTree syntaxTree = context.currentSyntaxTree().orElseThrow();
Optional<NonTerminalNode> node = CommonUtil.findNode(optVariableSymbol.get(), syntaxTree);
if (node.isPresent() && node.get().kind() == SyntaxKind.TYPED_BINDING_PATTERN) {
return Optional.of(((TypedBindingPatternNode) node.get()).typeDescriptor());
} else {
return Optional.empty();
}
case CONST_DECLARATION:
ConstantDeclarationNode constDecl = (ConstantDeclarationNode) matchedNode;
return Optional.ofNullable(constDecl.typeDescriptor().orElse(null));
case OBJECT_FIELD:
return Optional.of(((ObjectFieldNode) matchedNode).typeName());
case LET_VAR_DECL:
return Optional.ofNullable(((LetVariableDeclarationNode) matchedNode)
.typedBindingPattern().typeDescriptor());
default:
return Optional.empty();
}
}
private Optional<String> getVariableName(Node matchedNode) {
switch (matchedNode.kind()) {
case LOCAL_VAR_DECL:
VariableDeclarationNode variableDeclrNode = (VariableDeclarationNode) matchedNode;
BindingPatternNode bindingPatternNode = variableDeclrNode.typedBindingPattern().bindingPattern();
if (bindingPatternNode.kind() != SyntaxKind.CAPTURE_BINDING_PATTERN) {
return Optional.empty();
}
CaptureBindingPatternNode captureBindingPatternNode = (CaptureBindingPatternNode) bindingPatternNode;
return Optional.of(captureBindingPatternNode.variableName().text());
case MODULE_VAR_DECL:
ModuleVariableDeclarationNode modVarDecl = (ModuleVariableDeclarationNode) matchedNode;
BindingPatternNode bindingPattern = modVarDecl.typedBindingPattern().bindingPattern();
if (bindingPattern.kind() != SyntaxKind.CAPTURE_BINDING_PATTERN) {
return Optional.empty();
}
return Optional.of(((CaptureBindingPatternNode) bindingPattern).variableName().text());
case ASSIGNMENT_STATEMENT:
AssignmentStatementNode assignmentStmtNode = (AssignmentStatementNode) matchedNode;
Node varRef = assignmentStmtNode.varRef();
if (varRef.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE) {
return Optional.of(((SimpleNameReferenceNode) varRef).name().text());
} else if (varRef.kind() == SyntaxKind.QUALIFIED_NAME_REFERENCE) {
return Optional.of(((QualifiedNameReferenceNode) varRef).identifier().text());
}
return Optional.empty();
case CONST_DECLARATION:
ConstantDeclarationNode constantDecl = (ConstantDeclarationNode) matchedNode;
return Optional.of(constantDecl.variableName().text());
case OBJECT_FIELD:
ObjectFieldNode objectFieldNode = (ObjectFieldNode) matchedNode;
return Optional.of(objectFieldNode.fieldName().text());
case LET_VAR_DECL:
LetVariableDeclarationNode variableDecl = (LetVariableDeclarationNode) matchedNode;
BindingPatternNode node = variableDecl.typedBindingPattern().bindingPattern();
return Optional.of(((CaptureBindingPatternNode) node).variableName().text());
default:
return Optional.empty();
}
}
private boolean isValidType(TypeSymbol typeSymbol) {
if (typeSymbol.typeKind() == TypeDescKind.COMPILATION_ERROR || typeSymbol.typeKind() == TypeDescKind.NONE) {
return false;
}
if (typeSymbol.typeKind() == TypeDescKind.MAP) {
return ((MapTypeSymbol) typeSymbol).typeParam().typeKind() != TypeDescKind.COMPILATION_ERROR;
}
if (typeSymbol.typeKind() == TypeDescKind.TABLE) {
return ((TableTypeSymbol) typeSymbol).rowTypeParameter().typeKind() != TypeDescKind.COMPILATION_ERROR;
}
return true;
}
}
|
class ChangeVariableTypeCodeAction extends TypeCastCodeAction {
public static final String NAME = "Change Variable Type";
public static final Set<String> DIAGNOSTIC_CODES = Set.of("BCE2066", "BCE2068", "BCE2652", "BCE3931");
@Override
public boolean validate(Diagnostic diagnostic, DiagBasedPositionDetails positionDetails,
CodeActionContext context) {
return DIAGNOSTIC_CODES.contains(diagnostic.diagnosticInfo().code()) &&
CodeActionNodeValidator.validate(context.nodeAtRange());
}
/**
* {@inheritDoc}
*/
@Override
public List<CodeAction> getCodeActions(Diagnostic diagnostic,
DiagBasedPositionDetails positionDetails,
CodeActionContext context) {
Optional<TypeSymbol> foundType;
if ("BCE2068".equals(diagnostic.diagnosticInfo().code())) {
foundType = positionDetails.diagnosticProperty(
CodeActionUtil.getDiagPropertyFilterFunction(
DiagBasedPositionDetails.DIAG_PROP_INCOMPATIBLE_TYPES_FOUND_SYMBOL_INDEX));
} else {
foundType = positionDetails.diagnosticProperty(
DiagBasedPositionDetails.DIAG_PROP_INCOMPATIBLE_TYPES_FOUND_SYMBOL_INDEX);
}
if (foundType.isEmpty() || !isValidType(foundType.get())) {
return Collections.emptyList();
}
Optional<NonTerminalNode> variableNode = getVariableOrObjectFieldNode(positionDetails.matchedNode());
if (variableNode.isEmpty()) {
return Collections.emptyList();
}
Optional<Node> typeNode = getTypeNode(variableNode.get(), context);
Optional<String> variableName = getVariableName(variableNode.get());
if (typeNode.isEmpty() || variableName.isEmpty()) {
return Collections.emptyList();
}
Optional<String> typeNodeStr = getTypeNodeStr(typeNode.get());
List<CodeAction> actions = new ArrayList<>();
List<TextEdit> importEdits = new ArrayList<>();
List<String> types;
if ("BCE3931".equals(diagnostic.diagnosticInfo().code())) {
types = Collections.singletonList(((TypeDescTypeSymbol) foundType.get()).typeParameter().get().signature());
} else {
types = CodeActionUtil.getPossibleTypes(foundType.get(), importEdits, context);
}
for (String type : types) {
String typeName = FunctionGenerator.processModuleIDsInText(new ImportsAcceptor(context), type, context);
if (typeNodeStr.isPresent() && typeNodeStr.get().equals(typeName)) {
continue;
}
List<TextEdit> edits = new ArrayList<>();
edits.add(new TextEdit(PositionUtil.toRange(typeNode.get().lineRange()), typeName));
String commandTitle;
if (variableNode.get().kind() == SyntaxKind.CONST_DECLARATION) {
commandTitle = String.format(CommandConstants.CHANGE_CONST_TYPE_TITLE, variableName.get(), typeName);
} else {
commandTitle = String.format(CommandConstants.CHANGE_VAR_TYPE_TITLE, variableName.get(), typeName);
}
edits.addAll(importEdits);
actions.add(CodeActionUtil
.createCodeAction(commandTitle, edits, context.fileUri(), CodeActionKind.QuickFix));
}
return actions;
}
@Override
public String getName() {
return NAME;
}
boolean isVariableNode(NonTerminalNode sNode) {
if (sNode == null || sNode.kind() == SyntaxKind.POSITIONAL_ARG || sNode.kind() == SyntaxKind.NAMED_ARG) {
return false;
}
return sNode.kind() == SyntaxKind.LOCAL_VAR_DECL
|| sNode.kind() == SyntaxKind.MODULE_VAR_DECL
|| sNode.kind() == SyntaxKind.ASSIGNMENT_STATEMENT
|| sNode.kind() == SyntaxKind.CONST_DECLARATION
|| sNode.kind() == SyntaxKind.LET_VAR_DECL;
}
private Optional<String> getTypeNodeStr(Node node) {
if (node.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE) {
SimpleNameReferenceNode sRefNode = (SimpleNameReferenceNode) node;
return Optional.of(sRefNode.name().text());
} else if (node.kind() == SyntaxKind.QUALIFIED_NAME_REFERENCE) {
QualifiedNameReferenceNode qnRefNode = (QualifiedNameReferenceNode) node;
return Optional.of(qnRefNode.modulePrefix().text() + ":" + qnRefNode.identifier().text());
} else if (node instanceof BuiltinSimpleNameReferenceNode) {
return Optional.of(((BuiltinSimpleNameReferenceNode) node).name().text());
}
return Optional.empty();
}
private Optional<Node> getTypeNode(Node matchedNode, CodeActionContext context) {
switch (matchedNode.kind()) {
case LOCAL_VAR_DECL:
return Optional.of(
((VariableDeclarationNode) matchedNode).typedBindingPattern().typeDescriptor());
case MODULE_VAR_DECL:
return Optional.of(
((ModuleVariableDeclarationNode) matchedNode).typedBindingPattern().typeDescriptor());
case ASSIGNMENT_STATEMENT:
Optional<VariableSymbol> optVariableSymbol = getVariableSymbol(context, matchedNode);
if (optVariableSymbol.isEmpty()) {
return Optional.empty();
}
SyntaxTree syntaxTree = context.currentSyntaxTree().orElseThrow();
Optional<NonTerminalNode> node = CommonUtil.findNode(optVariableSymbol.get(), syntaxTree);
if (node.isPresent() && node.get().kind() == SyntaxKind.TYPED_BINDING_PATTERN) {
return Optional.of(((TypedBindingPatternNode) node.get()).typeDescriptor());
} else {
return Optional.empty();
}
case CONST_DECLARATION:
ConstantDeclarationNode constDecl = (ConstantDeclarationNode) matchedNode;
return Optional.ofNullable(constDecl.typeDescriptor().orElse(null));
case OBJECT_FIELD:
return Optional.of(((ObjectFieldNode) matchedNode).typeName());
case LET_VAR_DECL:
return Optional.ofNullable(((LetVariableDeclarationNode) matchedNode)
.typedBindingPattern().typeDescriptor());
default:
return Optional.empty();
}
}
private Optional<String> getVariableName(Node matchedNode) {
switch (matchedNode.kind()) {
case LOCAL_VAR_DECL:
VariableDeclarationNode variableDeclrNode = (VariableDeclarationNode) matchedNode;
BindingPatternNode bindingPatternNode = variableDeclrNode.typedBindingPattern().bindingPattern();
if (bindingPatternNode.kind() != SyntaxKind.CAPTURE_BINDING_PATTERN) {
return Optional.empty();
}
CaptureBindingPatternNode captureBindingPatternNode = (CaptureBindingPatternNode) bindingPatternNode;
return Optional.of(captureBindingPatternNode.variableName().text());
case MODULE_VAR_DECL:
ModuleVariableDeclarationNode modVarDecl = (ModuleVariableDeclarationNode) matchedNode;
BindingPatternNode bindingPattern = modVarDecl.typedBindingPattern().bindingPattern();
if (bindingPattern.kind() != SyntaxKind.CAPTURE_BINDING_PATTERN) {
return Optional.empty();
}
return Optional.of(((CaptureBindingPatternNode) bindingPattern).variableName().text());
case ASSIGNMENT_STATEMENT:
AssignmentStatementNode assignmentStmtNode = (AssignmentStatementNode) matchedNode;
Node varRef = assignmentStmtNode.varRef();
if (varRef.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE) {
return Optional.of(((SimpleNameReferenceNode) varRef).name().text());
} else if (varRef.kind() == SyntaxKind.QUALIFIED_NAME_REFERENCE) {
return Optional.of(((QualifiedNameReferenceNode) varRef).identifier().text());
}
return Optional.empty();
case CONST_DECLARATION:
ConstantDeclarationNode constantDecl = (ConstantDeclarationNode) matchedNode;
return Optional.of(constantDecl.variableName().text());
case OBJECT_FIELD:
ObjectFieldNode objectFieldNode = (ObjectFieldNode) matchedNode;
return Optional.of(objectFieldNode.fieldName().text());
case LET_VAR_DECL:
LetVariableDeclarationNode variableDecl = (LetVariableDeclarationNode) matchedNode;
BindingPatternNode node = variableDecl.typedBindingPattern().bindingPattern();
return Optional.of(((CaptureBindingPatternNode) node).variableName().text());
default:
return Optional.empty();
}
}
private boolean isValidType(TypeSymbol typeSymbol) {
if (typeSymbol.typeKind() == TypeDescKind.COMPILATION_ERROR || typeSymbol.typeKind() == TypeDescKind.NONE) {
return false;
}
if (typeSymbol.typeKind() == TypeDescKind.MAP) {
return ((MapTypeSymbol) typeSymbol).typeParam().typeKind() != TypeDescKind.COMPILATION_ERROR;
}
if (typeSymbol.typeKind() == TypeDescKind.TABLE) {
return ((TableTypeSymbol) typeSymbol).rowTypeParameter().typeKind() != TypeDescKind.COMPILATION_ERROR;
}
return true;
}
}
|
out of curiosity, what was the issue here?
|
public void emitRecord(StreamRecord<E> streamRecord) throws Exception {
events.add(streamRecord.copy(streamRecord.getValue()));
}
|
events.add(streamRecord.copy(streamRecord.getValue()));
|
public void emitRecord(StreamRecord<E> streamRecord) throws Exception {
events.add(streamRecord.copy(streamRecord.getValue()));
}
|
class CollectingDataOutput<E> implements PushingAsyncDataInput.DataOutput<E> {
final List<Object> events = new ArrayList<>();
@Override
public void emitWatermark(Watermark watermark) throws Exception {
events.add(watermark);
}
@Override
public void emitWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {
events.add(watermarkStatus);
}
@Override
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) throws Exception {
events.add(latencyMarker);
}
public List<Object> getEvents() {
return events;
}
}
|
class CollectingDataOutput<E> implements PushingAsyncDataInput.DataOutput<E> {
final List<Object> events = new ArrayList<>();
@Override
public void emitWatermark(Watermark watermark) throws Exception {
events.add(watermark);
}
@Override
public void emitWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {
events.add(watermarkStatus);
}
@Override
@Override
public void emitLatencyMarker(LatencyMarker latencyMarker) throws Exception {
events.add(latencyMarker);
}
public List<Object> getEvents() {
return events;
}
}
|
OK, keep it for now.
|
public void assertHasUncompletedJob() throws IOException, InvocationTargetException, NoSuchMethodException, IllegalAccessException {
final JobConfiguration jobConfiguration = JobConfigurationBuilder.createJobConfiguration();
RuleAlteredJobContext jobContext = new RuleAlteredJobContext(jobConfiguration);
JobProgress finishProcess = new JobProgress();
finishProcess.setStatus(JobStatus.FINISHED);
jobContext.setInitProgress(finishProcess);
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
repositoryAPI.persistJobProgress(jobContext);
URL jobConfigUrl = getClass().getClassLoader().getResource("scaling/rule_alter/scaling_job_config.yaml");
assertNotNull(jobConfigUrl);
repositoryAPI.persist(PipelineMetaDataNode.getJobConfigPath("0130317c30317c3054317c6c6f6769635f6462"), FileUtils.readFileToString(new File(jobConfigUrl.getFile())));
Object result = ReflectionUtil.invokeMethod(new RuleAlteredJobWorker(), "isUncompletedJobOfSameSchemaInJobList", new Class[]{String.class},
new String[]{jobConfiguration.getWorkflowConfig().getSchemaName()});
assertTrue((Boolean) result);
}
|
assertNotNull(jobConfigUrl);
|
public void assertHasUncompletedJob() throws InvocationTargetException, NoSuchMethodException, IllegalAccessException, IOException {
final JobConfiguration jobConfiguration = JobConfigurationBuilder.createJobConfiguration();
RuleAlteredJobContext jobContext = new RuleAlteredJobContext(jobConfiguration);
jobContext.setStatus(JobStatus.PREPARING);
GovernanceRepositoryAPI repositoryAPI = PipelineAPIFactory.getGovernanceRepositoryAPI();
repositoryAPI.persistJobProgress(jobContext);
URL jobConfigUrl = getClass().getClassLoader().getResource("scaling/rule_alter/scaling_job_config.yaml");
assertNotNull(jobConfigUrl);
repositoryAPI.persist(PipelineMetaDataNode.getJobConfigPath(jobContext.getJobId()), FileUtils.readFileToString(new File(jobConfigUrl.getFile())));
Object result = ReflectionUtil.invokeMethod(new RuleAlteredJobWorker(), "isUncompletedJobOfSameSchemaInJobList", new Class[]{String.class},
new String[]{jobConfiguration.getWorkflowConfig().getSchemaName()});
assertFalse((Boolean) result);
}
|
class RuleAlteredJobWorkerTest {
static {
ShardingSphereServiceLoader.register(ShardingRuleAlteredDetector.class);
}
@BeforeClass
public static void beforeClass() {
PipelineContextUtil.mockModeConfigAndContextManager();
}
@Test(expected = PipelineJobCreationException.class)
public void assertCreateRuleAlteredContextNoAlteredRule() {
JobConfiguration jobConfig = JobConfigurationBuilder.createJobConfiguration();
jobConfig.setWorkflowConfig(new WorkflowConfiguration("logic_db", ImmutableMap.of(), 0, 1));
RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
}
@Test
public void assertCreateRuleAlteredContextSuccess() {
JobConfiguration jobConfig = JobConfigurationBuilder.createJobConfiguration();
RuleAlteredContext ruleAlteredContext = RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
assertNotNull(ruleAlteredContext.getOnRuleAlteredActionConfig());
}
@Test
public void assertRuleAlteredActionEnabled() {
ShardingRuleConfiguration ruleConfiguration = new ShardingRuleConfiguration();
ruleConfiguration.setScalingName("default_scaling");
assertTrue(RuleAlteredJobWorker.isOnRuleAlteredActionEnabled(ruleConfiguration));
}
@Test
public void assertRuleAlteredActionDisabled() throws IOException, InvocationTargetException, NoSuchMethodException, IllegalAccessException {
URL dataSourceUrl = getClass().getClassLoader().getResource("scaling/detector/datasource_config.yaml");
assertNotNull(dataSourceUrl);
URL sourceRuleUrl = getClass().getClassLoader().getResource("scaling/rule_alter/source_rules_config.yaml");
assertNotNull(sourceRuleUrl);
URL targetRuleUrl = getClass().getClassLoader().getResource("scaling/rule_alter/target_rules_config.yaml");
assertNotNull(targetRuleUrl);
StartScalingEvent startScalingEvent = new StartScalingEvent("logic_db", FileUtils.readFileToString(new File(dataSourceUrl.getFile())),
FileUtils.readFileToString(new File(sourceRuleUrl.getFile())), FileUtils.readFileToString(new File(dataSourceUrl.getFile())),
FileUtils.readFileToString(new File(targetRuleUrl.getFile())), 0, 1);
RuleAlteredJobWorker ruleAlteredJobWorker = new RuleAlteredJobWorker();
Object result = ReflectionUtil.invokeMethod(ruleAlteredJobWorker, "createJobConfig", new Class[]{StartScalingEvent.class}, new Object[]{startScalingEvent});
assertTrue(((Optional<?>) result).isPresent());
}
@Test
}
|
class RuleAlteredJobWorkerTest {
@BeforeClass
public static void beforeClass() {
PipelineContextUtil.mockModeConfigAndContextManager();
}
@Test(expected = PipelineJobCreationException.class)
public void assertCreateRuleAlteredContextNoAlteredRule() {
JobConfiguration jobConfig = JobConfigurationBuilder.createJobConfiguration();
jobConfig.setWorkflowConfig(new WorkflowConfiguration("logic_db", ImmutableMap.of(), 0, 1));
RuleAlteredJobWorker.createRuleAlteredContext(jobConfig);
}
@Test
public void assertCreateRuleAlteredContextSuccess() {
assertNotNull(RuleAlteredJobWorker.createRuleAlteredContext(JobConfigurationBuilder.createJobConfiguration()).getOnRuleAlteredActionConfig());
}
@Test
public void assertRuleAlteredActionEnabled() {
ShardingRuleConfiguration ruleConfiguration = new ShardingRuleConfiguration();
ruleConfiguration.setScalingName("default_scaling");
assertTrue(RuleAlteredJobWorker.isOnRuleAlteredActionEnabled(ruleConfiguration));
}
@Test
public void assertRuleAlteredActionDisabled() throws InvocationTargetException, NoSuchMethodException, IllegalAccessException {
ShardingSpherePipelineDataSourceConfiguration pipelineDataSourceConfig = new ShardingSpherePipelineDataSourceConfiguration(
ConfigurationFileUtil.readFile("config_sharding_sphere_jdbc_source.yaml"));
ShardingSpherePipelineDataSourceConfiguration pipelineDataTargetConfig = new ShardingSpherePipelineDataSourceConfiguration(
ConfigurationFileUtil.readFile("config_sharding_sphere_jdbc_target.yaml"));
StartScalingEvent startScalingEvent = new StartScalingEvent("logic_db", YamlEngine.marshal(pipelineDataSourceConfig.getRootConfig().getDataSources()),
YamlEngine.marshal(pipelineDataSourceConfig.getRootConfig().getRules()), YamlEngine.marshal(pipelineDataTargetConfig.getRootConfig().getDataSources()),
YamlEngine.marshal(pipelineDataTargetConfig.getRootConfig().getRules()), 0, 1);
RuleAlteredJobWorker ruleAlteredJobWorker = new RuleAlteredJobWorker();
Object result = ReflectionUtil.invokeMethod(ruleAlteredJobWorker, "createJobConfig", new Class[]{StartScalingEvent.class}, new Object[]{startScalingEvent});
assertTrue(((Optional<?>) result).isPresent());
}
@Test
}
|
Awaiting changes: https://github.com/ballerina-platform/ballerina-lang/pull/39509/commits/973652b34a52057d12e7ff7d57cf32da8b7fb56f from #39509.
|
public void testRegexpTemplateLiteralExpr(int sLine, int sCol, int eLine, int eCol) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), REGEXP);
}
|
assertEquals(type.typeKind(), TYPE_REFERENCE);
|
public void testRegexpTemplateLiteralExpr(int sLine, int sCol, int eLine, int eCol) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), REGEXP);
}
|
class ExpressionTypeTestNew {
private SemanticModel model;
@BeforeClass
public void setup() {
model = SemanticAPITestUtils.getDefaultModulesSemanticModel("test-src/expressions_test.bal");
}
@Test(dataProvider = "LiteralPosProvider")
public void testLiterals(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "LiteralPosProvider")
public Object[][] getLiteralPos() {
return new Object[][]{
{17, 21, 17, 22, INT},
{17, 24, 17, 29, FLOAT},
{17, 31, 17, 36, DECIMAL},
{17, 38, 17, 42, BOOLEAN},
{17, 44, 17, 46, NIL},
{17, 48, 17, 53, STRING},
{18, 13, 18, 17, NIL},
};
}
@Test
public void testByteLiteral() {
TypeSymbol type = getExprType(19, 15, 19, 44);
assertEquals(type.typeKind(), ARRAY);
assertEquals(((ArrayTypeSymbol) type).memberTypeDescriptor().typeKind(), BYTE);
}
@Test
public void testStringTemplateExpr() {
assertType(23, 15, 23, 36, STRING);
}
@Test
public void testXMLTemplateExpr() {
TypeSymbol type = getExprType(24, 12, 24, 45);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), XML_ELEMENT);
}
@Test
public void testRawTemplate() {
TypeSymbol type = getExprType(25, 29, 25, 50);
assertEquals(type.typeKind(), TYPE_REFERENCE);
TypeSymbol objType = ((TypeReferenceTypeSymbol) type).typeDescriptor();
assertEquals(objType.typeKind(), OBJECT);
type = getExprType(25, 32, 25, 33);
assertEquals(type.typeKind(), STRING);
}
@Test
public void testArrayLiteral() {
TypeSymbol type = getExprType(29, 20, 29, 34);
assertEquals(type.typeKind(), ARRAY);
TypeSymbol memberType = ((ArrayTypeSymbol) type).memberTypeDescriptor();
assertEquals(memberType.typeKind(), STRING);
}
@Test(dataProvider = "RegexpTemplateLiteralPosProvider")
@DataProvider(name = "RegexpTemplateLiteralPosProvider")
private Object[][] getRegexpTemplateLiteralPos() {
return new Object[][] {
{362, 8, 362, 22},
{363, 8, 363, 13},
{364, 8, 364, 14},
{365, 8, 365, 20},
};
}
@Test(dataProvider = "TupleLiteralPosProvider")
public void testTupleLiteral(int sLine, int sCol, int eLine, int eCol, List<TypeDescKind> memberKinds) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TUPLE);
List<TypeSymbol> memberTypes = ((TupleTypeSymbol) type).memberTypeDescriptors();
assertEquals(memberTypes.size(), memberKinds.size());
for (int i = 0; i < memberTypes.size(); i++) {
TypeSymbol memberType = memberTypes.get(i);
assertEquals(memberType.typeKind(), memberKinds.get(i));
}
}
@DataProvider(name = "TupleLiteralPosProvider")
public Object[][] getTuplePos() {
return new Object[][]{
{30, 15, 30, 27, List.of(INT, INT, INT)},
{32, 31, 32, 49, List.of(INT, STRING, FLOAT)},
};
}
@Test
public void testMapLiteral() {
TypeSymbol type = getExprType(34, 20, 34, 34);
assertEquals(type.typeKind(), MAP);
TypeSymbol constraint = ((MapTypeSymbol) type).typeParam();
assertEquals(constraint.typeKind(), STRING);
assertType(34, 28, 34, 33, STRING);
}
@Test
public void testInferredMappingConstructorType() {
TypeSymbol type = getExprType(35, 13, 35, 43);
assertEquals(type.typeKind(), RECORD);
assertType(35, 14, 35, 20, STRING);
assertType(35, 22, 35, 31, STRING);
assertType(35, 33, 35, 39, STRING);
assertType(35, 41, 35, 42, INT);
}
@Test
public void testRecordLiteral() {
TypeSymbol type = getExprType(40, 16, 40, 43);
assertEquals(type.typeKind(), RECORD);
assertType(40, 17, 21, null);
assertType(40, 23, 40, 33, STRING);
assertType(40, 35, 38, null);
assertType(40, 40, 40, 42, INT);
}
@Test
public void testJSONObject() {
TypeSymbol type = getExprType(42, 13, 42, 40);
assertEquals(type.typeKind(), MAP);
TypeSymbol constraint = ((MapTypeSymbol) type).typeParam();
assertEquals(constraint.typeKind(), JSON);
assertType(42, 14, 18, null);
assertType(42, 20, 30, STRING);
assertType(42, 32, 35, null);
assertType(42, 37, 39, INT);
}
@Test(dataProvider = "AccessPosProvider")
public void testAccessExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "AccessPosProvider")
public Object[][] getFieldAccessPos() {
return new Object[][]{
{51, 18, 51, 29, STRING},
{51, 18, 51, 24, RECORD},
{52, 15, 52, 26, UNION},
{52, 15, 52, 21, RECORD},
{53, 21, 53, 35, STRING},
{53, 21, 53, 27, RECORD},
{53, 28, 53, 34, STRING},
};
}
@Test(dataProvider = "TypeInitPosProvider")
public void testObjecTypeInit(int sLine, int sCol, int eLine, int eCol) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(type.getName().get(), "PersonObj");
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), OBJECT);
}
@DataProvider(name = "TypeInitPosProvider")
public Object[][] getTypeInitPos() {
return new Object[][]{
{57, 19, 57, 33},
{58, 19, 58, 42}
};
}
@Test
public void testArgsInNewExpr() {
assertType(57, 24, 57, 32, STRING);
assertType(58, 33, 58, 41, STRING);
}
@Test
public void testObjectConstructorExpr() {
assertType(64, 15, 68, 5, OBJECT);
assertType(65, 15, 65, 19, null);
assertType(65, 22, 65, 28, STRING);
assertType(67, 45, 67, 54, STRING);
assertType(67, 45, 67, 49, OBJECT);
assertType(67, 50, 67, 54, STRING);
}
@Test(dataProvider = "MiscExprPosProvider")
public void testMiscExprs(int sLine, int sCol, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eCol, kind);
}
@DataProvider(name = "MiscExprPosProvider")
public Object[][] getExprPos() {
return new Object[][]{
{72, 12, 15, INT},
{73, 12, 23, INT},
{73, 12, 19, INT},
{73, 17, 23, null},
{74, 12, 23, INT},
{75, 16, 22, BOOLEAN},
{76, 17, 22, STRING},
{78, 8, 20, BOOLEAN},
{78, 8, 10, ANYDATA},
{78, 14, 20, null},
};
}
@Test(dataProvider = "CheckingExprPosProvider")
public void testCheckingExprs(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "CheckingExprPosProvider")
public Object[][] getCheckingExprPos() {
return new Object[][]{
{86, 16, 86, 27, STRING},
{86, 22, 86, 27, UNION},
{87, 16, 87, 32, STRING},
{87, 27, 87, 32, UNION},
};
}
@Test(dataProvider = "CastingExprPosProvider")
public void testCastingExprs(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "CastingExprPosProvider")
public Object[][] getCastingExprPos() {
return new Object[][]{
{92, 15, 92, 25, STRING},
{92, 23, 92, 25, ANYDATA},
{93, 12, 93, 30, INT},
{93, 28, 93, 30, ANYDATA},
};
}
@Test
public void testInferredRecordTypeForInvalidExprs() {
assertType(97, 4, 97, 20, RECORD);
}
@Test
public void testStartAction() {
TypeSymbol type = getExprType(101, 4, 101, 21);
assertEquals(type.typeKind(), FUTURE);
assertEquals(((FutureTypeSymbol) type).typeParameter().get().typeKind(), NIL);
assertType(101, 10, 101, 21, NIL);
}
@Test
public void testFutureResultType() {
TypeSymbol type = getExprType(350, 31, 350, 38);
assertEquals(type.typeKind(), FUTURE);
Optional<TypeSymbol> typeParameter = ((FutureTypeSymbol) type).typeParameter();
assertTrue(typeParameter.isPresent());
assertEquals(typeParameter.get().typeKind(), INT);
assertType(354, 17, 354, 24, INT);
}
@Test(dataProvider = "CallExprPosProvider")
public void testFunctionCall(int sLine, int sCol, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eCol, kind);
}
@DataProvider(name = "CallExprPosProvider")
public Object[][] getCallExprPos() {
return new Object[][]{
{109, 4, 10, null},
{109, 4, 9, UNION},
{112, 15, 16, TYPE_REFERENCE},
{112, 15, 27, null},
{112, 15, 26, STRING},
{127, 4, 35, null},
{127, 4, 34, STRING},
{129, 12, 36, INT},
{129, 12, 37, null},
{130, 4, 35, BOOLEAN}
};
}
@Test
public void testExpressionsOfIntersectionTypes() {
assertType(135, 4, 21, INTERSECTION);
assertType(135, 4, 22, null);
assertType(137, 4, 24, null);
assertType(137, 4, 23, INTERSECTION);
assertType(139, 4, 26, UNION);
TypeSymbol t1 = getExprType(139, 4, 139, 26);
assertEquals(t1.typeKind(), UNION);
assertEquals(t1.signature(), "(Foo & readonly)|int|(string[] & readonly)");
assertType(141, 4, 27, null);
TypeSymbol t2 = getExprType(141, 4, 141, 26);
assertEquals(t2.typeKind(), UNION);
assertEquals(t2.signature(), "(int[] & readonly)?");
assertType(143, 4, 27, null);
TypeSymbol t3 = getExprType(143, 4, 143, 26);
assertEquals(t3.typeKind(), UNION);
assertEquals(t3.signature(), "(int[] & readonly)?");
}
@Test
public void testTypeWithinServiceDecl() {
assertType(118, 15, 118, 16, RECORD);
}
@Test
public void testTypeWithinDoAndOnFailClause() {
TypeSymbol exprType = getExprType(164, 16, 164, 23);
assertEquals(exprType.typeKind(), TYPE_REFERENCE);
assertEquals(exprType.getName().get(), "Foo");
exprType = getExprType(166, 12, 166, 42);
assertEquals(exprType.typeKind(), STRING);
}
@Test
public void testFuncCallForDependentlyTypedSignatures() {
TypeSymbol exprType = getExprType(172, 12, 172, 35);
assertEquals(exprType.typeKind(), INT);
}
@Test
public void testTypeOfExprInErroredStmt() {
TypeSymbol type = getExprType(177, 12, 177, 23);
assertEquals(type.typeKind(), UNION);
UnionTypeSymbol union = (UnionTypeSymbol) type;
assertEquals(union.memberTypeDescriptors().get(0).typeKind(), INT);
assertEquals(union.memberTypeDescriptors().get(1).typeKind(), ERROR);
}
@Test(dataProvider = "TableCtrPosProvider")
public void testTableConstructor(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "TableCtrPosProvider")
public Object[][] getTableCtrPos() {
return new Object[][]{
{181, 33, 183, 5, TABLE},
{181, 43, 181, 45, null},
{182, 25, 182, 35, STRING},
{181, 10, 181, 16, null},
};
}
@Test(dataProvider = "XMLAttribAccessPos")
public void testXMLAttribAccess(int sLine, int sCol, int eLine, int eCol, List<TypeDescKind> memKinds) {
TypeSymbol type = assertType(sLine, sCol, eLine, eCol, UNION);
UnionTypeSymbol union = (UnionTypeSymbol) type;
List<TypeSymbol> userSpecifiedMemberTypes = union.userSpecifiedMemberTypes();
for (int i = 0; i < userSpecifiedMemberTypes.size(); i++) {
TypeSymbol memType = userSpecifiedMemberTypes.get(i);
assertEquals(memType.typeKind(), memKinds.get(i));
}
}
@DataProvider(name = "XMLAttribAccessPos")
public Object[][] getXMLAttrib() {
return new Object[][]{
{188, 23, 188, 29, List.of(STRING, ERROR)},
{192, 10, 192, 19, List.of(STRING, ERROR)},
};
}
@Test
public void testAnnotAccess() {
UnionTypeSymbol type = (UnionTypeSymbol) assertType(198, 19, 198, 29, UNION);
assertEquals(type.userSpecifiedMemberTypes().get(0).typeKind(), TYPE_REFERENCE);
assertEquals(type.userSpecifiedMemberTypes().get(1).typeKind(), NIL);
assertType(198, 19, 198, 25, TYPEDESC);
}
@Test(dataProvider = "ErrorCtrPos")
public void testErrorConstructor(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "ErrorCtrPos")
public Object[][] getErrorCtrPos() {
return new Object[][]{
{203, 17, 203, 38, ERROR},
{203, 23, 203, 26, STRING},
{203, 32, 203, 37, STRING},
{204, 23, 204, 28, null},
{204, 34, 204, 38, ERROR},
{204, 44, 204, 46, INT},
};
}
@Test
public void testErrorCtr2() {
TypeSymbol type = assertType(204, 17, 204, 47, TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), ERROR);
}
@Test(dataProvider = "AnonFuncPos")
public void testAnonFuncs(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "AnonFuncPos")
public Object[][] getAnonFnPos() {
return new Object[][]{
{209, 16, 209, 68, FUNCTION},
{209, 46, 209, 68, STRING},
{211, 14, 213, 5, FUNCTION},
{211, 27, 213, 5, null},
{211, 51, 211, 52, INT},
{216, 42, 216, 61, FUNCTION},
{216, 43, 216, 44, null},
{216, 56, 216, 57, INT},
};
}
@Test(dataProvider = "LetExprPos")
public void testLetExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "LetExprPos")
public Object[][] getLetExprPos() {
return new Object[][]{
{220, 12, 220, 88, INT},
{220, 24, 220, 26, INT},
{220, 37, 220, 42, STRING},
{220, 70, 220, 78, STRING},
{220, 83, 220, 88, INT},
};
}
@Test(dataProvider = "TypeOfPos")
public void testTypeOfExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "TypeOfPos")
public Object[][] getTypeOfExprPos() {
return new Object[][]{
{225, 27, 225, 42, TYPEDESC},
{225, 34, 225, 42, INT},
};
}
@Test(dataProvider = "BitwiseExprPos")
public void testBitwiseExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "BitwiseExprPos")
public Object[][] getBitwiseExprPos() {
return new Object[][]{
{230, 14, 230, 15, INT},
{230, 14, 230, 20, INT},
{231, 10, 231, 18, INT},
{232, 10, 232, 19, INT},
};
}
@Test(dataProvider = "LogicalExprPos")
public void testLogicalExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "LogicalExprPos")
public Object[][] getLogicalExprPos() {
return new Object[][]{
{237, 18, 237, 30, BOOLEAN},
{237, 18, 237, 22, BOOLEAN},
{238, 10, 238, 27, BOOLEAN},
{238, 11, 238, 13, INT},
{241, 31, 241, 44, STRING},
{241, 31, 241, 35, UNION},
{241, 39, 241, 44, STRING},
{246, 14, 246, 25, INT},
{246, 20, 246, 25, BYTE},
{247, 10, 247, 17, INT},
{248, 10, 248, 21, BYTE},
{255, 13, 255, 21, OBJECT},
{256, 13, 256, 18, OBJECT},
{257, 13, 257, 18, OBJECT},
{266, 13, 266, 22, XML},
{267, 9, 267, 22, XML},
{272, 13, 272, 26, XML},
{273, 9, 273, 13, XML},
{274, 9, 274, 15, XML},
{275, 9, 275, 25, XML},
{276, 9, 276, 25, XML},
{276, 23, 276, 24, INT},
{277, 9, 277, 33, XML},
{282, 12, 282, 34, INT},
{282, 14, 282, 28, INT},
};
}
@Test
public void testTrapExpr() {
UnionTypeSymbol type = (UnionTypeSymbol) assertType(261, 20, 261, 33, UNION);
assertEquals(type.userSpecifiedMemberTypes().get(0).typeKind(), INT);
assertEquals(type.userSpecifiedMemberTypes().get(1).typeKind(), ERROR);
assertType(261, 25, 261, 33, INT);
}
private void assertType(int line, int sCol, int eCol, TypeDescKind kind) {
assertType(line, sCol, line, eCol, kind);
}
private TypeSymbol assertType(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
Optional<TypeSymbol> type = model.typeOf(
LineRange.from("expressions_test.bal", LinePosition.from(sLine, sCol), LinePosition.from(eLine, eCol)));
if (kind == null) {
assertTrue(type.isEmpty());
return null;
}
assertEquals(type.get().typeKind(), kind);
return type.get();
}
private TypeSymbol getExprType(int sLine, int sCol, int eLine, int eCol) {
LinePosition start = LinePosition.from(sLine, sCol);
LinePosition end = LinePosition.from(eLine, eCol);
Optional<TypeSymbol> typeSymbol = model.typeOf(LineRange.from("expressions_test.bal", start, end));
assertTrue(typeSymbol.isPresent());
return typeSymbol.get();
}
}
|
class ExpressionTypeTestNew {
private SemanticModel model;
@BeforeClass
public void setup() {
model = SemanticAPITestUtils.getDefaultModulesSemanticModel("test-src/expressions_test.bal");
}
@Test(dataProvider = "LiteralPosProvider")
public void testLiterals(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "LiteralPosProvider")
public Object[][] getLiteralPos() {
return new Object[][]{
{19, 21, 19, 22, INT},
{19, 24, 19, 29, FLOAT},
{19, 31, 19, 36, DECIMAL},
{19, 38, 19, 42, BOOLEAN},
{19, 44, 19, 46, NIL},
{19, 48, 19, 53, STRING},
{20, 13, 20, 17, NIL},
};
}
@Test
public void testByteLiteral() {
TypeSymbol type = getExprType(21, 15, 21, 44);
assertEquals(type.typeKind(), ARRAY);
assertEquals(((ArrayTypeSymbol) type).memberTypeDescriptor().typeKind(), BYTE);
}
@Test
public void testStringTemplateExpr() {
assertType(25, 15, 25, 36, STRING);
}
@Test
public void testXMLTemplateExpr() {
TypeSymbol type = getExprType(26, 12, 26, 45);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), XML_ELEMENT);
}
@Test
public void testRawTemplate() {
TypeSymbol type = getExprType(27, 29, 27, 50);
assertEquals(type.typeKind(), TYPE_REFERENCE);
TypeSymbol objType = ((TypeReferenceTypeSymbol) type).typeDescriptor();
assertEquals(objType.typeKind(), OBJECT);
type = getExprType(25, 32, 25, 33);
assertEquals(type.typeKind(), STRING);
}
@Test
public void testArrayLiteral() {
TypeSymbol type = getExprType(31, 20, 31, 34);
assertEquals(type.typeKind(), ARRAY);
TypeSymbol memberType = ((ArrayTypeSymbol) type).memberTypeDescriptor();
assertEquals(memberType.typeKind(), STRING);
}
@Test(dataProvider = "RegexpTemplateLiteralPosProvider")
@DataProvider(name = "RegexpTemplateLiteralPosProvider")
private Object[][] getRegexpTemplateLiteralPos() {
return new Object[][] {
{362, 8, 362, 22},
{363, 8, 363, 13},
{364, 8, 364, 14},
{365, 8, 365, 20},
{366, 25, 366, 33},
{367, 25, 367, 36},
};
}
@Test(dataProvider = "TupleLiteralPosProvider")
public void testTupleLiteral(int sLine, int sCol, int eLine, int eCol, List<TypeDescKind> memberKinds) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TUPLE);
List<TypeSymbol> memberTypes = ((TupleTypeSymbol) type).memberTypeDescriptors();
assertEquals(memberTypes.size(), memberKinds.size());
for (int i = 0; i < memberTypes.size(); i++) {
TypeSymbol memberType = memberTypes.get(i);
assertEquals(memberType.typeKind(), memberKinds.get(i));
}
}
@DataProvider(name = "TupleLiteralPosProvider")
public Object[][] getTuplePos() {
return new Object[][]{
{32, 15, 32, 27, List.of(INT, INT, INT)},
{33, 31, 33, 49, List.of(INT, STRING, FLOAT)},
};
}
@Test
public void testMapLiteral() {
TypeSymbol type = getExprType(34, 20, 34, 34);
assertEquals(type.typeKind(), MAP);
TypeSymbol constraint = ((MapTypeSymbol) type).typeParam();
assertEquals(constraint.typeKind(), STRING);
assertType(34, 28, 34, 33, STRING);
}
@Test
public void testInferredMappingConstructorType() {
TypeSymbol type = getExprType(35, 13, 35, 43);
assertEquals(type.typeKind(), RECORD);
assertType(35, 14, 35, 20, STRING);
assertType(35, 22, 35, 31, STRING);
assertType(35, 33, 35, 39, STRING);
assertType(35, 41, 35, 42, INT);
}
@Test
public void testRecordLiteral() {
TypeSymbol type = getExprType(40, 16, 40, 43);
assertEquals(type.typeKind(), RECORD);
assertType(40, 17, 21, null);
assertType(40, 23, 40, 33, STRING);
assertType(40, 35, 38, null);
assertType(40, 40, 40, 42, INT);
}
@Test
public void testJSONObject() {
TypeSymbol type = getExprType(42, 13, 42, 40);
assertEquals(type.typeKind(), MAP);
TypeSymbol constraint = ((MapTypeSymbol) type).typeParam();
assertEquals(constraint.typeKind(), JSON);
assertType(42, 14, 18, null);
assertType(42, 20, 30, STRING);
assertType(42, 32, 35, null);
assertType(42, 37, 39, INT);
}
@Test(dataProvider = "AccessPosProvider")
public void testAccessExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "AccessPosProvider")
public Object[][] getFieldAccessPos() {
return new Object[][]{
{51, 18, 51, 29, STRING},
{51, 18, 51, 24, RECORD},
{52, 15, 52, 26, UNION},
{52, 15, 52, 21, RECORD},
{53, 21, 53, 35, STRING},
{53, 21, 53, 27, RECORD},
{53, 28, 53, 34, STRING},
};
}
@Test(dataProvider = "TypeInitPosProvider")
public void testObjecTypeInit(int sLine, int sCol, int eLine, int eCol) {
TypeSymbol type = getExprType(sLine, sCol, eLine, eCol);
assertEquals(type.typeKind(), TYPE_REFERENCE);
assertEquals(type.getName().get(), "PersonObj");
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), OBJECT);
}
@DataProvider(name = "TypeInitPosProvider")
public Object[][] getTypeInitPos() {
return new Object[][]{
{57, 19, 57, 33},
{58, 19, 58, 42}
};
}
@Test
public void testArgsInNewExpr() {
assertType(57, 24, 57, 32, STRING);
assertType(58, 33, 58, 41, STRING);
}
@Test
public void testObjectConstructorExpr() {
assertType(64, 15, 68, 5, OBJECT);
assertType(65, 15, 65, 19, null);
assertType(65, 22, 65, 28, STRING);
assertType(67, 45, 67, 54, STRING);
assertType(67, 45, 67, 49, OBJECT);
assertType(67, 50, 67, 54, STRING);
}
@Test(dataProvider = "MiscExprPosProvider")
public void testMiscExprs(int sLine, int sCol, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eCol, kind);
}
@DataProvider(name = "MiscExprPosProvider")
public Object[][] getExprPos() {
return new Object[][]{
{72, 12, 15, INT},
{73, 12, 23, INT},
{73, 12, 19, INT},
{73, 17, 23, null},
{74, 12, 23, INT},
{75, 16, 22, BOOLEAN},
{76, 17, 22, STRING},
{78, 8, 20, BOOLEAN},
{78, 8, 10, ANYDATA},
{78, 14, 20, null},
};
}
@Test(dataProvider = "CheckingExprPosProvider")
public void testCheckingExprs(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "CheckingExprPosProvider")
public Object[][] getCheckingExprPos() {
return new Object[][]{
{86, 16, 86, 27, STRING},
{86, 22, 86, 27, UNION},
{87, 16, 87, 32, STRING},
{87, 27, 87, 32, UNION},
};
}
@Test(dataProvider = "CastingExprPosProvider")
public void testCastingExprs(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eLine, eCol, kind);
}
@DataProvider(name = "CastingExprPosProvider")
public Object[][] getCastingExprPos() {
return new Object[][]{
{92, 15, 92, 25, STRING},
{92, 23, 92, 25, ANYDATA},
{93, 12, 93, 30, INT},
{93, 28, 93, 30, ANYDATA},
};
}
@Test
public void testInferredRecordTypeForInvalidExprs() {
assertType(97, 4, 97, 20, RECORD);
}
@Test
public void testStartAction() {
TypeSymbol type = getExprType(101, 4, 101, 21);
assertEquals(type.typeKind(), FUTURE);
assertEquals(((FutureTypeSymbol) type).typeParameter().get().typeKind(), NIL);
assertType(101, 10, 101, 21, NIL);
}
@Test
public void testFutureResultType() {
TypeSymbol type = getExprType(350, 31, 350, 38);
assertEquals(type.typeKind(), FUTURE);
Optional<TypeSymbol> typeParameter = ((FutureTypeSymbol) type).typeParameter();
assertTrue(typeParameter.isPresent());
assertEquals(typeParameter.get().typeKind(), INT);
assertType(354, 17, 354, 24, INT);
}
@Test(dataProvider = "CallExprPosProvider")
public void testFunctionCall(int sLine, int sCol, int eCol, TypeDescKind kind) {
assertType(sLine, sCol, eCol, kind);
}
@DataProvider(name = "CallExprPosProvider")
public Object[][] getCallExprPos() {
return new Object[][]{
{109, 4, 10, null},
{109, 4, 9, UNION},
{112, 15, 16, TYPE_REFERENCE},
{112, 15, 27, null},
{112, 15, 26, STRING},
{127, 4, 35, null},
{127, 4, 34, STRING},
{129, 12, 36, INT},
{129, 12, 37, null},
{130, 4, 35, BOOLEAN}
};
}
@Test
public void testExpressionsOfIntersectionTypes() {
assertType(135, 4, 21, INTERSECTION);
assertType(135, 4, 22, null);
assertType(137, 4, 24, null);
assertType(137, 4, 23, INTERSECTION);
assertType(139, 4, 26, UNION);
TypeSymbol t1 = getExprType(139, 4, 139, 26);
assertEquals(t1.typeKind(), UNION);
assertEquals(t1.signature(), "(Foo & readonly)|int|(string[] & readonly)");
assertType(141, 4, 27, null);
TypeSymbol t2 = getExprType(141, 4, 141, 26);
assertEquals(t2.typeKind(), UNION);
assertEquals(t2.signature(), "(int[] & readonly)?");
assertType(143, 4, 27, null);
TypeSymbol t3 = getExprType(143, 4, 143, 26);
assertEquals(t3.typeKind(), UNION);
assertEquals(t3.signature(), "(int[] & readonly)?");
}
@Test
public void testTypeWithinServiceDecl() {
assertType(118, 15, 118, 16, RECORD);
}
@Test
public void testTypeWithinDoAndOnFailClause() {
TypeSymbol exprType = getExprType(164, 16, 164, 23);
assertEquals(exprType.typeKind(), TYPE_REFERENCE);
assertEquals(exprType.getName().get(), "Foo");
exprType = getExprType(166, 12, 166, 42);
assertEquals(exprType.typeKind(), STRING);
}
@Test
public void testFuncCallForDependentlyTypedSignatures() {
TypeSymbol exprType = getExprType(172, 12, 172, 35);
assertEquals(exprType.typeKind(), INT);
}
@Test
public void testTypeOfExprInErroredStmt() {
TypeSymbol type = getExprType(177, 12, 177, 23);
assertEquals(type.typeKind(), UNION);
UnionTypeSymbol union = (UnionTypeSymbol) type;
assertEquals(union.memberTypeDescriptors().get(0).typeKind(), INT);
assertEquals(union.memberTypeDescriptors().get(1).typeKind(), ERROR);
}
@Test(dataProvider = "TableCtrPosProvider")
public void testTableConstructor(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "TableCtrPosProvider")
public Object[][] getTableCtrPos() {
return new Object[][]{
{181, 33, 183, 5, TABLE},
{181, 43, 181, 45, null},
{182, 25, 182, 35, STRING},
{181, 10, 181, 16, null},
};
}
@Test(dataProvider = "XMLAttribAccessPos")
public void testXMLAttribAccess(int sLine, int sCol, int eLine, int eCol, List<TypeDescKind> memKinds) {
TypeSymbol type = assertType(sLine, sCol, eLine, eCol, UNION);
UnionTypeSymbol union = (UnionTypeSymbol) type;
List<TypeSymbol> userSpecifiedMemberTypes = union.userSpecifiedMemberTypes();
for (int i = 0; i < userSpecifiedMemberTypes.size(); i++) {
TypeSymbol memType = userSpecifiedMemberTypes.get(i);
assertEquals(memType.typeKind(), memKinds.get(i));
}
}
@DataProvider(name = "XMLAttribAccessPos")
public Object[][] getXMLAttrib() {
return new Object[][]{
{188, 23, 188, 29, List.of(STRING, ERROR)},
{192, 10, 192, 19, List.of(STRING, ERROR)},
};
}
@Test
public void testAnnotAccess() {
UnionTypeSymbol type = (UnionTypeSymbol) assertType(198, 19, 198, 29, UNION);
assertEquals(type.userSpecifiedMemberTypes().get(0).typeKind(), TYPE_REFERENCE);
assertEquals(type.userSpecifiedMemberTypes().get(1).typeKind(), NIL);
assertType(198, 19, 198, 25, TYPEDESC);
}
@Test(dataProvider = "ErrorCtrPos")
public void testErrorConstructor(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "ErrorCtrPos")
public Object[][] getErrorCtrPos() {
return new Object[][]{
{203, 17, 203, 38, ERROR},
{203, 23, 203, 26, STRING},
{203, 32, 203, 37, STRING},
{204, 23, 204, 28, null},
{204, 34, 204, 38, ERROR},
{204, 44, 204, 46, INT},
};
}
@Test
public void testErrorCtr2() {
TypeSymbol type = assertType(204, 17, 204, 47, TYPE_REFERENCE);
assertEquals(((TypeReferenceTypeSymbol) type).typeDescriptor().typeKind(), ERROR);
}
@Test(dataProvider = "AnonFuncPos")
public void testAnonFuncs(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "AnonFuncPos")
public Object[][] getAnonFnPos() {
return new Object[][]{
{209, 16, 209, 68, FUNCTION},
{209, 46, 209, 68, STRING},
{211, 14, 213, 5, FUNCTION},
{211, 27, 213, 5, null},
{211, 51, 211, 52, INT},
{216, 42, 216, 61, FUNCTION},
{216, 43, 216, 44, null},
{216, 56, 216, 57, INT},
};
}
@Test(dataProvider = "LetExprPos")
public void testLetExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "LetExprPos")
public Object[][] getLetExprPos() {
return new Object[][]{
{220, 12, 220, 88, INT},
{220, 24, 220, 26, INT},
{220, 37, 220, 42, STRING},
{220, 70, 220, 78, STRING},
{220, 83, 220, 88, INT},
};
}
@Test(dataProvider = "TypeOfPos")
public void testTypeOfExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "TypeOfPos")
public Object[][] getTypeOfExprPos() {
return new Object[][]{
{225, 27, 225, 42, TYPEDESC},
{225, 34, 225, 42, INT},
};
}
@Test(dataProvider = "BitwiseExprPos")
public void testBitwiseExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "BitwiseExprPos")
public Object[][] getBitwiseExprPos() {
return new Object[][]{
{230, 14, 230, 15, INT},
{230, 14, 230, 20, INT},
{231, 10, 231, 18, INT},
{232, 10, 232, 19, INT},
};
}
@Test(dataProvider = "LogicalExprPos")
public void testLogicalExpr(int sLine, int sCol, int eLine, int eCol, TypeDescKind expKind) {
assertType(sLine, sCol, eLine, eCol, expKind);
}
@DataProvider(name = "LogicalExprPos")
public Object[][] getLogicalExprPos() {
return new Object[][]{
{237, 18, 237, 30, BOOLEAN},
{237, 18, 237, 22, BOOLEAN},
{238, 10, 238, 27, BOOLEAN},
{238, 11, 238, 13, INT},
{241, 31, 241, 44, STRING},
{241, 31, 241, 35, UNION},
{241, 39, 241, 44, STRING},
{246, 14, 246, 25, INT},
{246, 20, 246, 25, BYTE},
{247, 10, 247, 17, INT},
{248, 10, 248, 21, BYTE},
{255, 13, 255, 21, OBJECT},
{256, 13, 256, 18, OBJECT},
{257, 13, 257, 18, OBJECT},
{266, 13, 266, 22, XML},
{267, 9, 267, 22, XML},
{272, 13, 272, 26, XML},
{273, 9, 273, 13, XML},
{274, 9, 274, 15, XML},
{275, 9, 275, 25, XML},
{276, 9, 276, 25, XML},
{276, 23, 276, 24, INT},
{277, 9, 277, 33, XML},
{282, 12, 282, 34, INT},
{282, 14, 282, 28, INT},
};
}
@Test
public void testTrapExpr() {
UnionTypeSymbol type = (UnionTypeSymbol) assertType(261, 20, 261, 33, UNION);
assertEquals(type.userSpecifiedMemberTypes().get(0).typeKind(), INT);
assertEquals(type.userSpecifiedMemberTypes().get(1).typeKind(), ERROR);
assertType(261, 25, 261, 33, INT);
}
private void assertType(int line, int sCol, int eCol, TypeDescKind kind) {
assertType(line, sCol, line, eCol, kind);
}
private TypeSymbol assertType(int sLine, int sCol, int eLine, int eCol, TypeDescKind kind) {
Optional<TypeSymbol> type = model.typeOf(
LineRange.from("expressions_test.bal", LinePosition.from(sLine, sCol), LinePosition.from(eLine, eCol)));
if (kind == null) {
assertTrue(type.isEmpty());
return null;
}
assertTrue(type.isPresent());
assertEquals(type.get().typeKind(), kind);
return type.get();
}
private TypeSymbol getExprType(int sLine, int sCol, int eLine, int eCol) {
LinePosition start = LinePosition.from(sLine, sCol);
LinePosition end = LinePosition.from(eLine, eCol);
Optional<TypeSymbol> typeSymbol = model.typeOf(LineRange.from("expressions_test.bal", start, end));
assertTrue(typeSymbol.isPresent());
return typeSymbol.get();
}
}
|
Ah, I thought max_deferred_task_version_wait_time_sec was an integral type.
|
private void configure(FleetcontrollerConfig config) {
options.clusterName = config.cluster_name();
options.fleetControllerIndex = config.index();
options.fleetControllerCount = config.fleet_controller_count();
options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000);
options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000);
options.stateGatherCount = config.state_gather_count();
options.rpcPort = config.rpc_port();
options.httpPort = config.http_port();
options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time());
options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time());
options.maxInitProgressTime = config.init_progress_time();
options.statePollingFrequency = config.state_polling_frequency();
options.maxPrematureCrashes = config.max_premature_crashes();
options.stableStateTimePeriod = config.stable_state_time_period();
options.eventLogMaxSize = config.event_log_max_size();
options.eventNodeLogMaxSize = config.event_node_log_max_size();
options.minDistributorNodesUp = config.min_distributors_up_count();
options.minStorageNodesUp = config.min_storage_up_count();
options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio();
options.minRatioOfStorageNodesUp = config.min_storage_up_ratio();
options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000);
options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000);
options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000);
options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log();
options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates();
options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000);
options.distributionBits = config.ideal_distribution_bits();
options.minNodeRatioPerGroup = config.min_node_ratio_per_group();
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
}
|
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
|
private void configure(FleetcontrollerConfig config) {
options.clusterName = config.cluster_name();
options.fleetControllerIndex = config.index();
options.fleetControllerCount = config.fleet_controller_count();
options.zooKeeperSessionTimeout = (int) (config.zookeeper_session_timeout() * 1000);
options.masterZooKeeperCooldownPeriod = (int) (config.master_zookeeper_cooldown_period() * 1000);
options.stateGatherCount = config.state_gather_count();
options.rpcPort = config.rpc_port();
options.httpPort = config.http_port();
options.maxTransitionTime.put(NodeType.STORAGE, config.storage_transition_time());
options.maxTransitionTime.put(NodeType.DISTRIBUTOR, config.distributor_transition_time());
options.maxInitProgressTime = config.init_progress_time();
options.statePollingFrequency = config.state_polling_frequency();
options.maxPrematureCrashes = config.max_premature_crashes();
options.stableStateTimePeriod = config.stable_state_time_period();
options.eventLogMaxSize = config.event_log_max_size();
options.eventNodeLogMaxSize = config.event_node_log_max_size();
options.minDistributorNodesUp = config.min_distributors_up_count();
options.minStorageNodesUp = config.min_storage_up_count();
options.minRatioOfDistributorNodesUp = config.min_distributor_up_ratio();
options.minRatioOfStorageNodesUp = config.min_storage_up_ratio();
options.cycleWaitTime = (int) (config.cycle_wait_time() * 1000);
options.minTimeBeforeFirstSystemStateBroadcast = (int) (config.min_time_before_first_system_state_broadcast() * 1000);
options.nodeStateRequestTimeoutMS = (int) (config.get_node_state_request_timeout() * 1000);
options.showLocalSystemStatesInEventLog = config.show_local_systemstates_in_event_log();
options.minTimeBetweenNewSystemStates = config.min_time_between_new_systemstates();
options.maxSlobrokDisconnectGracePeriod = (int) (config.max_slobrok_disconnect_grace_period() * 1000);
options.distributionBits = config.ideal_distribution_bits();
options.minNodeRatioPerGroup = config.min_node_ratio_per_group();
options.setMaxDeferredTaskVersionWaitTime(Duration.ofMillis((int)(config.max_deferred_task_version_wait_time_sec() * 1000)));
}
|
class ClusterControllerClusterConfigurer {
private final FleetControllerOptions options = new FleetControllerOptions(null);
public ClusterControllerClusterConfigurer(ClusterController controller,
StorDistributionConfig distributionConfig,
FleetcontrollerConfig fleetcontrollerConfig,
SlobroksConfig slobroksConfig,
ZookeepersConfig zookeepersConfig,
Metric metricImpl) throws Exception
{
configure(distributionConfig);
configure(fleetcontrollerConfig);
configure(slobroksConfig);
configure(zookeepersConfig);
checkIfZooKeeperNeeded();
if (controller != null) {
controller.setOptions(options.clusterName, options, metricImpl);
}
}
public FleetControllerOptions getOptions() { return options; }
private void configure(StorDistributionConfig config) {
options.setStorageDistribution(new Distribution(config));
}
private void configure(SlobroksConfig config) {
String specs[] = new String[config.slobrok().size()];
for (int i = 0; i < config.slobrok().size(); i++) {
specs[i] = config.slobrok().get(i).connectionspec();
}
options.slobrokConnectionSpecs = specs;
}
private void configure(ZookeepersConfig config) {
options.zooKeeperServerAddress = config.zookeeperserverlist();
}
private void checkIfZooKeeperNeeded() {
if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) {
if (options.fleetControllerCount > 1) {
throw new IllegalArgumentException(
"Must set zookeeper server with multiple fleetcontrollers");
} else {
options.zooKeeperServerAddress = null;
}
}
}
}
|
class ClusterControllerClusterConfigurer {
private final FleetControllerOptions options = new FleetControllerOptions(null);
public ClusterControllerClusterConfigurer(ClusterController controller,
StorDistributionConfig distributionConfig,
FleetcontrollerConfig fleetcontrollerConfig,
SlobroksConfig slobroksConfig,
ZookeepersConfig zookeepersConfig,
Metric metricImpl) throws Exception
{
configure(distributionConfig);
configure(fleetcontrollerConfig);
configure(slobroksConfig);
configure(zookeepersConfig);
checkIfZooKeeperNeeded();
if (controller != null) {
controller.setOptions(options.clusterName, options, metricImpl);
}
}
public FleetControllerOptions getOptions() { return options; }
private void configure(StorDistributionConfig config) {
options.setStorageDistribution(new Distribution(config));
}
private void configure(SlobroksConfig config) {
String specs[] = new String[config.slobrok().size()];
for (int i = 0; i < config.slobrok().size(); i++) {
specs[i] = config.slobrok().get(i).connectionspec();
}
options.slobrokConnectionSpecs = specs;
}
private void configure(ZookeepersConfig config) {
options.zooKeeperServerAddress = config.zookeeperserverlist();
}
private void checkIfZooKeeperNeeded() {
if (options.zooKeeperServerAddress == null || "".equals(options.zooKeeperServerAddress)) {
if (options.fleetControllerCount > 1) {
throw new IllegalArgumentException(
"Must set zookeeper server with multiple fleetcontrollers");
} else {
options.zooKeeperServerAddress = null;
}
}
}
}
|
Can we also add that this is to align with non-interactive implementation ?
|
public void execute() {
PrintStream out = System.out;
Path projectPath = Paths.get(System.getProperty(USER_DIR));
Scanner scanner = new Scanner(System.in, Charset.defaultCharset().name());
try {
Manifest manifest = null;
if (helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(INIT_COMMAND);
outStream.println(commandUsageInfo);
return;
}
List<SrcFile> sourceFiles = new ArrayList<>();
List<PackageMdFile> packageMdFiles = new ArrayList<>();
if (interactiveFlag) {
out.print("Create Ballerina.toml [yes/y, no/n]: (y) ");
String createToml = scanner.nextLine().trim();
if (createToml.equalsIgnoreCase("yes") || createToml.equalsIgnoreCase("y") || createToml.isEmpty()) {
manifest = new Manifest();
String defaultOrg = guessOrgName();
out.print("Organization name: (" + defaultOrg + ") ");
String orgName = scanner.nextLine().trim();
manifest.setName(orgName.isEmpty() ? defaultOrg : orgName);
String version;
do {
out.print("Version: (" + DEFAULT_VERSION + ") ");
version = scanner.nextLine().trim();
version = version.isEmpty() ? DEFAULT_VERSION : version;
} while (!validateVersion(out, version));
manifest.setVersion(version);
}
String srcInput;
boolean validInput = false;
boolean firstPrompt = true;
do {
if (firstPrompt) {
out.print("Ballerina source [service/s, main/m, finish/f]: (s) ");
} else {
out.print("Ballerina source [service/s, main/m, finish/f]: (f) ");
}
srcInput = scanner.nextLine().trim();
if (srcInput.equalsIgnoreCase("service") || srcInput.equalsIgnoreCase("s")
|| (srcInput.isEmpty() && firstPrompt)) {
String packageName;
do {
out.print("Package for the service: (no package) ");
packageName = scanner.nextLine().trim();
} while (!validatePkgName(out, packageName));
SrcFile srcFile = new SrcFile(packageName, FileType.SERVICE);
sourceFiles.add(srcFile);
SrcFile srcTestFile = new SrcFile(packageName, FileType.SERVICE_TEST);
sourceFiles.add(srcTestFile);
if (!packageName.isEmpty()) {
PackageMdFile packageMdFile = new PackageMdFile(packageName, FileType.SERVICE);
packageMdFiles.add(packageMdFile);
}
firstPrompt = false;
} else if (srcInput.equalsIgnoreCase("main") || srcInput.equalsIgnoreCase("m")) {
String packageName;
do {
out.print("Package for the main: (no package) ");
packageName = scanner.nextLine().trim();
} while (!validatePkgName(out, packageName));
SrcFile srcFile = new SrcFile(packageName, FileType.MAIN);
sourceFiles.add(srcFile);
SrcFile srcTestFile = new SrcFile(packageName, FileType.MAIN_TEST);
sourceFiles.add(srcTestFile);
if (!packageName.isEmpty()) {
PackageMdFile packageMdFile = new PackageMdFile(packageName, FileType.MAIN);
packageMdFiles.add(packageMdFile);
}
firstPrompt = false;
} else if (srcInput.isEmpty() || srcInput.equalsIgnoreCase("f") ||
srcInput.equalsIgnoreCase("finish")) {
validInput = true;
firstPrompt = false;
} else {
out.println("Invalid input");
}
} while (!validInput);
out.print("\n");
} else {
manifest = new Manifest();
manifest.setName(guessOrgName());
manifest.setVersion(DEFAULT_VERSION);
if (isDirEmpty(projectPath)) {
SrcFile srcFile = new SrcFile("", FileType.SERVICE);
sourceFiles.add(srcFile);
}
}
InitHandler.initialize(projectPath, manifest, sourceFiles, packageMdFiles);
out.println("Ballerina project initialized");
} catch (IOException e) {
out.println("Error occurred while creating project: " + e.getMessage());
}
}
|
public void execute() {
PrintStream out = System.out;
Path projectPath = Paths.get(System.getProperty(USER_DIR));
Scanner scanner = new Scanner(System.in, Charset.defaultCharset().name());
try {
Manifest manifest = null;
if (helpFlag) {
String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(INIT_COMMAND);
outStream.println(commandUsageInfo);
return;
}
List<SrcFile> sourceFiles = new ArrayList<>();
List<PackageMdFile> packageMdFiles = new ArrayList<>();
if (interactiveFlag) {
out.print("Create Ballerina.toml [yes/y, no/n]: (y) ");
String createToml = scanner.nextLine().trim();
if (createToml.equalsIgnoreCase("yes") || createToml.equalsIgnoreCase("y") || createToml.isEmpty()) {
manifest = new Manifest();
String defaultOrg = guessOrgName();
out.print("Organization name: (" + defaultOrg + ") ");
String orgName = scanner.nextLine().trim();
manifest.setName(orgName.isEmpty() ? defaultOrg : orgName);
String version;
do {
out.print("Version: (" + DEFAULT_VERSION + ") ");
version = scanner.nextLine().trim();
version = version.isEmpty() ? DEFAULT_VERSION : version;
} while (!validateVersion(out, version));
manifest.setVersion(version);
}
String srcInput;
boolean validInput = false;
boolean firstPrompt = true;
do {
if (firstPrompt) {
out.print("Ballerina source [service/s, main/m, finish/f]: (s) ");
} else {
out.print("Ballerina source [service/s, main/m, finish/f]: (f) ");
}
srcInput = scanner.nextLine().trim();
if (srcInput.equalsIgnoreCase("service") || srcInput.equalsIgnoreCase("s")
|| (srcInput.isEmpty() && firstPrompt)) {
String packageName;
do {
out.print("Package for the service: (no package) ");
packageName = scanner.nextLine().trim();
} while (!validatePkgName(out, packageName));
SrcFile srcFile = new SrcFile(packageName, FileType.SERVICE);
sourceFiles.add(srcFile);
SrcFile srcTestFile = new SrcFile(packageName, FileType.SERVICE_TEST);
sourceFiles.add(srcTestFile);
if (!packageName.isEmpty()) {
PackageMdFile packageMdFile = new PackageMdFile(packageName, FileType.SERVICE);
packageMdFiles.add(packageMdFile);
}
firstPrompt = false;
} else if (srcInput.equalsIgnoreCase("main") || srcInput.equalsIgnoreCase("m")) {
String packageName;
do {
out.print("Package for the main: (no package) ");
packageName = scanner.nextLine().trim();
} while (!validatePkgName(out, packageName));
SrcFile srcFile = new SrcFile(packageName, FileType.MAIN);
sourceFiles.add(srcFile);
SrcFile srcTestFile = new SrcFile(packageName, FileType.MAIN_TEST);
sourceFiles.add(srcTestFile);
if (!packageName.isEmpty()) {
PackageMdFile packageMdFile = new PackageMdFile(packageName, FileType.MAIN);
packageMdFiles.add(packageMdFile);
}
firstPrompt = false;
} else if (srcInput.isEmpty() || srcInput.equalsIgnoreCase("f") ||
srcInput.equalsIgnoreCase("finish")) {
validInput = true;
firstPrompt = false;
} else {
out.println("Invalid input");
}
} while (!validInput);
out.print("\n");
} else {
manifest = new Manifest();
manifest.setName(guessOrgName());
manifest.setVersion(DEFAULT_VERSION);
if (isDirEmpty(projectPath)) {
SrcFile srcFile = new SrcFile("", FileType.SERVICE);
sourceFiles.add(srcFile);
}
}
InitHandler.initialize(projectPath, manifest, sourceFiles, packageMdFiles);
out.println("Ballerina project initialized");
} catch (IOException e) {
out.println("Error occurred while creating project: " + e.getMessage());
}
}
|
class InitCommand implements BLauncherCmd {
public static final String DEFAULT_VERSION = "0.0.1";
private static final String USER_DIR = "user.dir";
private static final PrintStream outStream = System.err;
@CommandLine.Option(names = {"--interactive", "-i"})
private boolean interactiveFlag;
@CommandLine.Option(names = {"--help", "-h"}, hidden = true)
private boolean helpFlag;
private static boolean isDirEmpty(final Path directory) throws IOException {
try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory)) {
return !dirStream.iterator().hasNext();
}
}
@Override
/**
* {@inheritDoc}
*/
@Override
public String getName() {
return INIT_COMMAND;
}
/**
* {@inheritDoc}
*/
@Override
public void printLongDesc(StringBuilder out) {
out.append("Initializes a Ballerina Project. \n");
out.append("\n");
out.append("Use --interactive or -i to create a ballerina project in interactive mode.\n");
}
/**
* {@inheritDoc}
*/
@Override
public void printUsage(StringBuilder out) {
out.append(" ballerina init [-i] \n");
}
/**
* {@inheritDoc}
*/
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
}
/**
* {@inheritDoc}
*/
@Override
public void setSelfCmdParser(CommandLine selfCmdParser) {
}
/**
* Validates the version is a semver version.
*
* @param versionAsString The version.
* @return True if valid version, else false.
*/
private boolean validateVersion(PrintStream out, String versionAsString) {
String semverRegex = "((?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*))";
boolean matches = Pattern.matches(semverRegex, versionAsString);
if (!matches) {
out.println("--Invalid version: \"" + versionAsString + "\"");
}
return matches;
}
private String guessOrgName() {
String guessOrgName = System.getProperty("user.name");
if (guessOrgName == null) {
guessOrgName = "my_org";
} else {
guessOrgName = guessOrgName.toLowerCase(Locale.getDefault());
}
return guessOrgName;
}
/**
* Validates the package name.
*
* @param pkgName The package name.
* @return True if valid package name, else false.
*/
private boolean validatePkgName(PrintStream out, String pkgName) {
if (pkgName.isEmpty()) {
return true;
}
String validRegex = "^[a-zA-Z0-9_.]*$";
boolean matches = Pattern.matches(validRegex, pkgName);
if (!matches) {
out.println("--Invalid package name: \"" + pkgName + "\"." + " Package name can only contain " +
"alphanumeric, underscore and DOT");
}
return matches;
}
}
|
class InitCommand implements BLauncherCmd {
public static final String DEFAULT_VERSION = "0.0.1";
private static final String USER_DIR = "user.dir";
private static final PrintStream outStream = System.err;
@CommandLine.Option(names = {"--interactive", "-i"})
private boolean interactiveFlag;
@CommandLine.Option(names = {"--help", "-h"}, hidden = true)
private boolean helpFlag;
private static boolean isDirEmpty(final Path directory) throws IOException {
try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory)) {
return !dirStream.iterator().hasNext();
}
}
@Override
/**
* {@inheritDoc}
*/
@Override
public String getName() {
return INIT_COMMAND;
}
/**
* {@inheritDoc}
*/
@Override
public void printLongDesc(StringBuilder out) {
out.append("Initializes a Ballerina Project. \n");
out.append("\n");
out.append("Use --interactive or -i to create a ballerina project in interactive mode.\n");
}
/**
* {@inheritDoc}
*/
@Override
public void printUsage(StringBuilder out) {
out.append(" ballerina init [-i] \n");
}
/**
* {@inheritDoc}
*/
@Override
public void setParentCmdParser(CommandLine parentCmdParser) {
}
/**
* {@inheritDoc}
*/
@Override
public void setSelfCmdParser(CommandLine selfCmdParser) {
}
/**
* Validates the version is a semver version.
*
* @param versionAsString The version.
* @return True if valid version, else false.
*/
private boolean validateVersion(PrintStream out, String versionAsString) {
String semverRegex = "((?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*))";
boolean matches = Pattern.matches(semverRegex, versionAsString);
if (!matches) {
out.println("--Invalid version: \"" + versionAsString + "\"");
}
return matches;
}
private String guessOrgName() {
String guessOrgName = System.getProperty("user.name");
if (guessOrgName == null) {
guessOrgName = "my_org";
} else {
guessOrgName = guessOrgName.toLowerCase(Locale.getDefault());
}
return guessOrgName;
}
/**
* Validates the package name.
*
* @param pkgName The package name.
* @return True if valid package name, else false.
*/
private boolean validatePkgName(PrintStream out, String pkgName) {
if (pkgName.isEmpty()) {
return true;
}
String validRegex = "^[a-zA-Z0-9_.]*$";
boolean matches = Pattern.matches(validRegex, pkgName);
if (!matches) {
out.println("--Invalid package name: \"" + pkgName + "\"." + " Package name can only contain " +
"alphanumeric, underscore and DOT");
}
return matches;
}
}
|
|
The ```TaskExecutorStateChangelogStoragesManager``` behaves like ```TaskExecutorLocalStateStoresManager```. It depends on the ```releaseStateChangelogStorageForJob``` to clean up, which will be triggered in ```TaskExecutor::closeJobManagerConnectionIfNoAllocatedResources``` when the last slot of a job is released.
|
public TaskExecutorStateChangelogStoragesManager() {
this.changelogStoragesByJobId = new HashMap<>();
this.lock = new Object();
this.closed = false;
this.shutdownHook =
ShutdownHookUtil.addShutdownHook(this::shutdown, getClass().getSimpleName(), LOG);
}
|
ShutdownHookUtil.addShutdownHook(this::shutdown, getClass().getSimpleName(), LOG);
|
public TaskExecutorStateChangelogStoragesManager() {
this.changelogStoragesByJobId = new HashMap<>();
this.closed = false;
this.shutdownHook =
ShutdownHookUtil.addShutdownHook(this::shutdown, getClass().getSimpleName(), LOG);
}
|
class TaskExecutorStateChangelogStoragesManager {
/** Logger for this class. */
private static final Logger LOG =
LoggerFactory.getLogger(TaskExecutorStateChangelogStoragesManager.class);
/**
* This map holds all state changelog storages for tasks running on the task manager / executor
* that own the instance of this. Maps from job id to all the subtask's state changelog
* storages.
*/
@GuardedBy("lock")
private final Map<JobID, StateChangelogStorage<?>> changelogStoragesByJobId;
/** Guarding lock for changelogStoragesByJobId and closed-flag. */
private final Object lock;
@GuardedBy("lock")
private boolean closed;
/** shutdown hook for this manager. */
private final Thread shutdownHook;
public StateChangelogStorage<?> stateChangelogStorageForJob(
@Nonnull JobID jobId, Configuration configuration) {
synchronized (lock) {
if (closed) {
throw new IllegalStateException(
"TaskExecutorStateChangelogStoragesManager is already closed and cannot "
+ "register a new StateChangelogStorage.");
}
StateChangelogStorage<?> stateChangelogStorage = changelogStoragesByJobId.get(jobId);
if (stateChangelogStorage == null) {
stateChangelogStorage = StateChangelogStorageLoader.load(configuration);
changelogStoragesByJobId.put(jobId, stateChangelogStorage);
if (stateChangelogStorage != null) {
LOG.debug(
"Registered new state changelog storage for job {} : {}.",
jobId,
stateChangelogStorage);
} else {
LOG.info(
"Try to registered new state changelog storage for job {},"
+ "but result is null.",
jobId);
}
} else {
LOG.debug(
"Found existing state changelog storage for job {}: {}.",
jobId,
stateChangelogStorage);
}
return stateChangelogStorage;
}
}
public void releaseStateChangelogStorageForJob(@Nonnull JobID jobId) {
LOG.debug("Releasing state changelog storage under job id {}.", jobId);
StateChangelogStorage<?> cleanupChangelogStorage;
synchronized (lock) {
if (closed) {
return;
}
cleanupChangelogStorage = changelogStoragesByJobId.remove(jobId);
}
if (cleanupChangelogStorage != null) {
doRelease(cleanupChangelogStorage);
}
}
public void shutdown() {
HashMap<JobID, StateChangelogStorage<?>> toRelease;
synchronized (lock) {
if (closed) {
return;
}
closed = true;
toRelease = new HashMap<>(changelogStoragesByJobId);
changelogStoragesByJobId.clear();
}
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
LOG.info("Shutting down TaskExecutorStateChangelogStoragesManager.");
for (Map.Entry<JobID, StateChangelogStorage<?>> entry : toRelease.entrySet()) {
doRelease(entry.getValue());
}
}
private void doRelease(StateChangelogStorage<?> storage) {
if (storage != null) {
try {
storage.close();
} catch (Exception e) {
LOG.warn("Exception while disposing state changelog storage {}.", storage, e);
}
}
}
}
|
class TaskExecutorStateChangelogStoragesManager {
/** Logger for this class. */
private static final Logger LOG =
LoggerFactory.getLogger(TaskExecutorStateChangelogStoragesManager.class);
/**
* This map holds all state changelog storages for tasks running on the task manager / executor
* that own the instance of this. Maps from job id to all the subtask's state changelog
* storages. Value type Optional is for containing the null value.
*/
private final Map<JobID, Optional<StateChangelogStorage<?>>> changelogStoragesByJobId;
private boolean closed;
/** shutdown hook for this manager. */
private final Thread shutdownHook;
@Nullable
public StateChangelogStorage<?> stateChangelogStorageForJob(
@Nonnull JobID jobId, Configuration configuration) {
if (closed) {
throw new IllegalStateException(
"TaskExecutorStateChangelogStoragesManager is already closed and cannot "
+ "register a new StateChangelogStorage.");
}
Optional<StateChangelogStorage<?>> stateChangelogStorage =
changelogStoragesByJobId.get(jobId);
if (stateChangelogStorage == null) {
StateChangelogStorage<?> loaded = StateChangelogStorageLoader.load(configuration);
stateChangelogStorage = Optional.ofNullable(loaded);
changelogStoragesByJobId.put(jobId, stateChangelogStorage);
if (loaded != null) {
LOG.debug("Registered new state changelog storage for job {} : {}.", jobId, loaded);
} else {
LOG.info(
"Try to registered new state changelog storage for job {},"
+ " but result is null.",
jobId);
}
} else if (stateChangelogStorage.isPresent()) {
LOG.debug(
"Found existing state changelog storage for job {}: {}.",
jobId,
stateChangelogStorage.get());
} else {
LOG.debug("Found a previously loaded NULL state changelog storage for job {}.", jobId);
}
return stateChangelogStorage.orElse(null);
}
public void releaseStateChangelogStorageForJob(@Nonnull JobID jobId) {
LOG.debug("Releasing state changelog storage under job id {}.", jobId);
if (closed) {
return;
}
Optional<StateChangelogStorage<?>> cleanupChangelogStorage =
changelogStoragesByJobId.remove(jobId);
if (cleanupChangelogStorage != null) {
cleanupChangelogStorage.ifPresent(this::doRelease);
}
}
public void shutdown() {
if (closed) {
return;
}
closed = true;
HashMap<JobID, Optional<StateChangelogStorage<?>>> toRelease =
new HashMap<>(changelogStoragesByJobId);
changelogStoragesByJobId.clear();
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
LOG.info("Shutting down TaskExecutorStateChangelogStoragesManager.");
for (Map.Entry<JobID, Optional<StateChangelogStorage<?>>> entry : toRelease.entrySet()) {
entry.getValue().ifPresent(this::doRelease);
}
}
private void doRelease(StateChangelogStorage<?> storage) {
if (storage != null) {
try {
storage.close();
} catch (Exception e) {
LOG.warn("Exception while disposing state changelog storage {}.", storage, e);
}
}
}
}
|
It is quite confusing, in such cases, I think merging Limit into such an AggregationOperator seems a mistake instead of intention.
|
public boolean check(OptExpression input, OptimizerContext context) {
LogicalLimitOperator limit = (LogicalLimitOperator) input.getOp();
Operator op = input.inputAt(0).getOp();
if (op instanceof LogicalAggregationOperator) {
LogicalAggregationOperator aggOp = op.cast();
List<CallOperator> distinctAggOperatorList = aggOp.getAggregations().values().stream()
.filter(CallOperator::isDistinct).collect(Collectors.toList());
boolean hasMultiColumns = distinctAggOperatorList.stream().anyMatch(f -> f.getChildren().size() > 1);
return (!hasMultiColumns || distinctAggOperatorList.size() > 1) && limit.isLocal();
} else {
return limit.isLocal();
}
}
|
}
|
public boolean check(OptExpression input, OptimizerContext context) {
LogicalLimitOperator limit = (LogicalLimitOperator) input.getOp();
Operator op = input.inputAt(0).getOp();
if (op instanceof LogicalAggregationOperator) {
LogicalAggregationOperator aggOp = op.cast();
List<CallOperator> distinctAggOperatorList = aggOp.getAggregations().values().stream()
.filter(CallOperator::isDistinct).collect(Collectors.toList());
boolean hasMultiColumns = distinctAggOperatorList.stream().anyMatch(f -> f.getChildren().size() > 1);
return (!hasMultiColumns || distinctAggOperatorList.size() > 1) && limit.isLocal();
} else {
return limit.isLocal();
}
}
|
class MergeLimitDirectRule extends TransformationRule {
public static final MergeLimitDirectRule AGGREGATE = new MergeLimitDirectRule(OperatorType.LOGICAL_AGGR);
public static final MergeLimitDirectRule OLAP_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_OLAP_SCAN);
public static final MergeLimitDirectRule HIVE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_HIVE_SCAN);
public static final MergeLimitDirectRule ICEBERG_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_ICEBERG_SCAN);
public static final MergeLimitDirectRule HUDI_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_HUDI_SCAN);
public static final MergeLimitDirectRule DELTALAKE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_DELTALAKE_SCAN);
public static final MergeLimitDirectRule FILE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_FILE_SCAN);
public static final MergeLimitDirectRule SCHEMA_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_SCHEMA_SCAN);
public static final MergeLimitDirectRule MYSQL_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_MYSQL_SCAN);
public static final MergeLimitDirectRule ES_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_ES_SCAN);
public static final MergeLimitDirectRule JDBC_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_JDBC_SCAN);
public static final MergeLimitDirectRule WINDOW = new MergeLimitDirectRule(OperatorType.LOGICAL_WINDOW);
public static final MergeLimitDirectRule INTERSECT = new MergeLimitDirectRule(OperatorType.LOGICAL_INTERSECT);
public static final MergeLimitDirectRule EXCEPT = new MergeLimitDirectRule(OperatorType.LOGICAL_EXCEPT);
public static final MergeLimitDirectRule VALUES = new MergeLimitDirectRule(OperatorType.LOGICAL_VALUES);
public static final MergeLimitDirectRule FILTER = new MergeLimitDirectRule(OperatorType.LOGICAL_FILTER);
public static final MergeLimitDirectRule TABLE_FUNCTION =
new MergeLimitDirectRule(OperatorType.LOGICAL_TABLE_FUNCTION);
private MergeLimitDirectRule(OperatorType logicalOperatorType) {
super(RuleType.TF_MERGE_LIMIT_DIRECT, Pattern.create(OperatorType.LOGICAL_LIMIT)
.addChildren(Pattern.create(logicalOperatorType, OperatorType.PATTERN_MULTI_LEAF)));
}
@Override
@Override
public List<OptExpression> transform(OptExpression input, OptimizerContext context) {
LogicalLimitOperator limit = (LogicalLimitOperator) input.getOp();
Preconditions.checkState(!limit.hasOffset());
LogicalOperator op = (LogicalOperator) input.getInputs().get(0).getOp();
op.setLimit(limit.getLimit());
return Lists.newArrayList(OptExpression.create(op, input.getInputs().get(0).getInputs()));
}
}
|
class MergeLimitDirectRule extends TransformationRule {
public static final MergeLimitDirectRule AGGREGATE = new MergeLimitDirectRule(OperatorType.LOGICAL_AGGR);
public static final MergeLimitDirectRule OLAP_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_OLAP_SCAN);
public static final MergeLimitDirectRule HIVE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_HIVE_SCAN);
public static final MergeLimitDirectRule ICEBERG_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_ICEBERG_SCAN);
public static final MergeLimitDirectRule HUDI_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_HUDI_SCAN);
public static final MergeLimitDirectRule DELTALAKE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_DELTALAKE_SCAN);
public static final MergeLimitDirectRule FILE_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_FILE_SCAN);
public static final MergeLimitDirectRule SCHEMA_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_SCHEMA_SCAN);
public static final MergeLimitDirectRule MYSQL_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_MYSQL_SCAN);
public static final MergeLimitDirectRule ES_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_ES_SCAN);
public static final MergeLimitDirectRule JDBC_SCAN = new MergeLimitDirectRule(OperatorType.LOGICAL_JDBC_SCAN);
public static final MergeLimitDirectRule WINDOW = new MergeLimitDirectRule(OperatorType.LOGICAL_WINDOW);
public static final MergeLimitDirectRule INTERSECT = new MergeLimitDirectRule(OperatorType.LOGICAL_INTERSECT);
public static final MergeLimitDirectRule EXCEPT = new MergeLimitDirectRule(OperatorType.LOGICAL_EXCEPT);
public static final MergeLimitDirectRule VALUES = new MergeLimitDirectRule(OperatorType.LOGICAL_VALUES);
public static final MergeLimitDirectRule FILTER = new MergeLimitDirectRule(OperatorType.LOGICAL_FILTER);
public static final MergeLimitDirectRule TABLE_FUNCTION =
new MergeLimitDirectRule(OperatorType.LOGICAL_TABLE_FUNCTION);
private MergeLimitDirectRule(OperatorType logicalOperatorType) {
super(RuleType.TF_MERGE_LIMIT_DIRECT, Pattern.create(OperatorType.LOGICAL_LIMIT)
.addChildren(Pattern.create(logicalOperatorType, OperatorType.PATTERN_MULTI_LEAF)));
}
@Override
@Override
public List<OptExpression> transform(OptExpression input, OptimizerContext context) {
LogicalLimitOperator limit = (LogicalLimitOperator) input.getOp();
Preconditions.checkState(!limit.hasOffset());
LogicalOperator op = (LogicalOperator) input.getInputs().get(0).getOp();
op.setLimit(limit.getLimit());
return Lists.newArrayList(OptExpression.create(op, input.getInputs().get(0).getInputs()));
}
}
|
nit: Will LOG.error make it easier to be discovered?
|
public void waitForSuccess(Duration duration) throws IOException {
SubscriptionPath resultSubscriptionPath =
PubsubClient.subscriptionPathFromName(
pipelineOptions.getProject(),
"result-subscription-" + String.valueOf(ThreadLocalRandom.current().nextLong()));
pubsub.createSubscription(
resultTopicPath, resultSubscriptionPath, (int) duration.getStandardSeconds());
String result = pollForResultForDuration(resultSubscriptionPath, duration);
try {
pubsub.deleteSubscription(resultSubscriptionPath);
} catch (IOException e) {
LOG.warn(String.format("Leaked PubSub subscription '%s'", resultSubscriptionPath));
}
if (!RESULT_SUCCESS_MESSAGE.equals(result)) {
throw new AssertionError(result);
}
}
|
LOG.warn(String.format("Leaked PubSub subscription '%s'", resultSubscriptionPath));
|
public void waitForSuccess(Duration duration) throws IOException {
SubscriptionPath resultSubscriptionPath =
PubsubClient.subscriptionPathFromName(
pipelineOptions.getProject(),
"result-subscription-" + String.valueOf(ThreadLocalRandom.current().nextLong()));
pubsub.createSubscription(
resultTopicPath, resultSubscriptionPath, (int) duration.getStandardSeconds());
String result = pollForResultForDuration(resultSubscriptionPath, duration);
try {
pubsub.deleteSubscription(resultSubscriptionPath);
} catch (IOException e) {
LOG.error(String.format("Leaked PubSub subscription '%s'", resultSubscriptionPath));
}
if (!RESULT_SUCCESS_MESSAGE.equals(result)) {
throw new AssertionError(result);
}
}
|
class TestPubsubSignal implements TestRule {
private static final Logger LOG = LoggerFactory.getLogger(TestPubsubSignal.class);
private static final String RESULT_TOPIC_NAME = "result";
private static final String RESULT_SUCCESS_MESSAGE = "SUCCESS";
private static final String START_TOPIC_NAME = "start";
private static final String START_SIGNAL_MESSAGE = "START SIGNAL";
private static final String NO_ID_ATTRIBUTE = null;
private static final String NO_TIMESTAMP_ATTRIBUTE = null;
PubsubClient pubsub;
private TestPubsubOptions pipelineOptions;
private @Nullable TopicPath resultTopicPath = null;
private @Nullable TopicPath startTopicPath = null;
/**
* Creates an instance of this rule.
*
* <p>Loads GCP configuration from {@link TestPipelineOptions}.
*/
public static TestPubsubSignal create() {
TestPubsubOptions options = TestPipeline.testingPipelineOptions().as(TestPubsubOptions.class);
return new TestPubsubSignal(options);
}
private TestPubsubSignal(TestPubsubOptions pipelineOptions) {
this.pipelineOptions = pipelineOptions;
}
@Override
public Statement apply(Statement base, Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
if (TestPubsubSignal.this.pubsub != null) {
throw new AssertionError(
"Pubsub client was not shutdown in previous test. "
+ "Topic path is'"
+ resultTopicPath
+ "'. "
+ "Current test: "
+ description.getDisplayName());
}
try {
initializePubsub(description);
base.evaluate();
} finally {
tearDown();
}
}
};
}
private void initializePubsub(Description description) throws IOException {
pubsub =
PubsubGrpcClient.FACTORY.newClient(
NO_TIMESTAMP_ATTRIBUTE, NO_ID_ATTRIBUTE, pipelineOptions);
TopicPath resultTopicPathTmp =
PubsubClient.topicPathFromName(
pipelineOptions.getProject(), createTopicName(description, RESULT_TOPIC_NAME));
TopicPath startTopicPathTmp =
PubsubClient.topicPathFromName(
pipelineOptions.getProject(), createTopicName(description, START_TOPIC_NAME));
pubsub.createTopic(resultTopicPathTmp);
pubsub.createTopic(startTopicPathTmp);
resultTopicPath = resultTopicPathTmp;
startTopicPath = startTopicPathTmp;
}
private void tearDown() throws IOException {
if (pubsub == null) {
return;
}
try {
if (resultTopicPath != null) {
pubsub.deleteTopic(resultTopicPath);
}
if (startTopicPath != null) {
pubsub.deleteTopic(startTopicPath);
}
} finally {
pubsub.close();
pubsub = null;
resultTopicPath = null;
}
}
/** Outputs a message that the pipeline has started. */
public PTransform<PBegin, PDone> signalStart() {
return new PublishStart(startTopicPath);
}
/**
* Outputs a success message when {@code successPredicate} is evaluated to true.
*
* <p>{@code successPredicate} is a {@link SerializableFunction} that accepts a set of currently
* captured events and returns true when the set satisfies the success criteria.
*
* <p>If {@code successPredicate} is evaluated to false, then it will be re-evaluated when next
* event becomes available.
*
* <p>If {@code successPredicate} is evaluated to true, then a success will be signaled and {@link
*
*
* <p>If {@code successPredicate} throws, then failure will be signaled and {@link
*
*/
public <T> PTransform<PCollection<? extends T>, POutput> signalSuccessWhen(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate) {
return new PublishSuccessWhen<>(coder, formatter, successPredicate, resultTopicPath);
}
/**
* Invocation of {@link
* with {@link Object
*/
public <T> PTransform<PCollection<? extends T>, POutput> signalSuccessWhen(
Coder<T> coder, SerializableFunction<Set<T>, Boolean> successPredicate) {
return signalSuccessWhen(coder, T::toString, successPredicate);
}
/**
* Future that waits for a start signal for {@code duration}.
*
* <p>This future must be created before running the pipeline. A subscription must exist prior to
* the start signal being published, which occurs immediately upon pipeline startup.
*/
public Supplier<Void> waitForStart(Duration duration) throws IOException {
SubscriptionPath startSubscriptionPath =
PubsubClient.subscriptionPathFromName(
pipelineOptions.getProject(),
"start-subscription-" + String.valueOf(ThreadLocalRandom.current().nextLong()));
pubsub.createSubscription(
startTopicPath, startSubscriptionPath, (int) duration.getStandardSeconds());
return Suppliers.memoize(
() -> {
try {
String result = pollForResultForDuration(startSubscriptionPath, duration);
checkState(START_SIGNAL_MESSAGE.equals(result));
return null;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
pubsub.deleteSubscription(startSubscriptionPath);
} catch (IOException e) {
LOG.warn(String.format("Leaked PubSub subscription '%s'", startSubscriptionPath));
}
}
});
}
/** Wait for a success signal for {@code duration}. */
private String pollForResultForDuration(
SubscriptionPath signalSubscriptionPath, Duration duration) throws IOException {
List<PubsubClient.IncomingMessage> signal = null;
DateTime endPolling = DateTime.now().plus(duration.getMillis());
do {
try {
signal = pubsub.pull(DateTime.now().getMillis(), signalSubscriptionPath, 1, false);
if (signal.isEmpty()) {
continue;
}
pubsub.acknowledge(
signalSubscriptionPath, signal.stream().map(IncomingMessage::ackId).collect(toList()));
break;
} catch (StatusRuntimeException e) {
if (!Status.DEADLINE_EXCEEDED.equals(e.getStatus())) {
LOG.warn(
"(Will retry) Error while polling {} for signal: {}",
signalSubscriptionPath,
e.getStatus());
}
sleep(500);
}
} while (DateTime.now().isBefore(endPolling));
if (signal == null || signal.isEmpty()) {
throw new AssertionError(
String.format(
"Did not receive signal on %s in %ss",
signalSubscriptionPath, duration.getStandardSeconds()));
}
return signal.get(0).message().getData().toStringUtf8();
}
private void sleep(long t) {
try {
Thread.sleep(t);
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
}
/** {@link PTransform} that signals once when the pipeline has started. */
static class PublishStart extends PTransform<PBegin, PDone> {
private final TopicPath startTopicPath;
PublishStart(TopicPath startTopicPath) {
this.startTopicPath = startTopicPath;
}
@Override
public PDone expand(PBegin input) {
return input
.apply("Start signal", Create.of(START_SIGNAL_MESSAGE))
.apply(PubsubIO.writeStrings().to(startTopicPath.getPath()));
}
}
/** {@link PTransform} that for validates whether elements seen so far match success criteria. */
static class PublishSuccessWhen<T> extends PTransform<PCollection<? extends T>, POutput> {
private final Coder<T> coder;
private final SerializableFunction<T, String> formatter;
private final SerializableFunction<Set<T>, Boolean> successPredicate;
private final TopicPath resultTopicPath;
PublishSuccessWhen(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate,
TopicPath resultTopicPath) {
this.coder = coder;
this.formatter = formatter;
this.successPredicate = successPredicate;
this.resultTopicPath = resultTopicPath;
}
@Override
public POutput expand(PCollection<? extends T> input) {
return input
.apply(Window.into(new GlobalWindows()))
.apply(WithKeys.of("dummyKey"))
.apply(
"checkAllEventsForSuccess",
ParDo.of(new StatefulPredicateCheck<>(coder, formatter, successPredicate)))
.apply("publishSuccess", PubsubIO.writeStrings().to(resultTopicPath.getPath()));
}
}
/**
* Stateful {@link DoFn} which caches the elements it sees and checks whether they satisfy the
* predicate.
*
* <p>When predicate is satisfied outputs "SUCCESS". If predicate throws exception, outputs
* "FAILURE".
*/
static class StatefulPredicateCheck<T> extends DoFn<KV<String, ? extends T>, String> {
private final SerializableFunction<T, String> formatter;
private SerializableFunction<Set<T>, Boolean> successPredicate;
private static final String SEEN_EVENTS = "seenEvents";
@StateId(SEEN_EVENTS)
private final StateSpec<BagState<T>> seenEvents;
StatefulPredicateCheck(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate) {
this.seenEvents = StateSpecs.bag(coder);
this.formatter = formatter;
this.successPredicate = successPredicate;
}
@ProcessElement
public void processElement(
ProcessContext context, @StateId(SEEN_EVENTS) BagState<T> seenEvents) {
seenEvents.add(context.element().getValue());
ImmutableSet<T> eventsSoFar = ImmutableSet.copyOf(seenEvents.read());
try {
if (successPredicate.apply(eventsSoFar)) {
context.output("SUCCESS");
}
} catch (Throwable e) {
context.output("FAILURE: " + e.getMessage());
}
}
}
}
|
class TestPubsubSignal implements TestRule {
private static final Logger LOG = LoggerFactory.getLogger(TestPubsubSignal.class);
private static final String RESULT_TOPIC_NAME = "result";
private static final String RESULT_SUCCESS_MESSAGE = "SUCCESS";
private static final String START_TOPIC_NAME = "start";
private static final String START_SIGNAL_MESSAGE = "START SIGNAL";
private static final String NO_ID_ATTRIBUTE = null;
private static final String NO_TIMESTAMP_ATTRIBUTE = null;
PubsubClient pubsub;
private TestPubsubOptions pipelineOptions;
private @Nullable TopicPath resultTopicPath = null;
private @Nullable TopicPath startTopicPath = null;
/**
* Creates an instance of this rule.
*
* <p>Loads GCP configuration from {@link TestPipelineOptions}.
*/
public static TestPubsubSignal create() {
TestPubsubOptions options = TestPipeline.testingPipelineOptions().as(TestPubsubOptions.class);
return new TestPubsubSignal(options);
}
private TestPubsubSignal(TestPubsubOptions pipelineOptions) {
this.pipelineOptions = pipelineOptions;
}
@Override
public Statement apply(Statement base, Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
if (TestPubsubSignal.this.pubsub != null) {
throw new AssertionError(
"Pubsub client was not shutdown in previous test. "
+ "Topic path is'"
+ resultTopicPath
+ "'. "
+ "Current test: "
+ description.getDisplayName());
}
try {
initializePubsub(description);
base.evaluate();
} finally {
tearDown();
}
}
};
}
private void initializePubsub(Description description) throws IOException {
pubsub =
PubsubGrpcClient.FACTORY.newClient(
NO_TIMESTAMP_ATTRIBUTE, NO_ID_ATTRIBUTE, pipelineOptions);
TopicPath resultTopicPathTmp =
PubsubClient.topicPathFromName(
pipelineOptions.getProject(), createTopicName(description, RESULT_TOPIC_NAME));
TopicPath startTopicPathTmp =
PubsubClient.topicPathFromName(
pipelineOptions.getProject(), createTopicName(description, START_TOPIC_NAME));
pubsub.createTopic(resultTopicPathTmp);
pubsub.createTopic(startTopicPathTmp);
resultTopicPath = resultTopicPathTmp;
startTopicPath = startTopicPathTmp;
}
private void tearDown() throws IOException {
if (pubsub == null) {
return;
}
try {
if (resultTopicPath != null) {
pubsub.deleteTopic(resultTopicPath);
}
if (startTopicPath != null) {
pubsub.deleteTopic(startTopicPath);
}
} finally {
pubsub.close();
pubsub = null;
resultTopicPath = null;
}
}
/** Outputs a message that the pipeline has started. */
public PTransform<PBegin, PDone> signalStart() {
return new PublishStart(startTopicPath);
}
/**
* Outputs a success message when {@code successPredicate} is evaluated to true.
*
* <p>{@code successPredicate} is a {@link SerializableFunction} that accepts a set of currently
* captured events and returns true when the set satisfies the success criteria.
*
* <p>If {@code successPredicate} is evaluated to false, then it will be re-evaluated when next
* event becomes available.
*
* <p>If {@code successPredicate} is evaluated to true, then a success will be signaled and {@link
*
*
* <p>If {@code successPredicate} throws, then failure will be signaled and {@link
*
*/
public <T> PTransform<PCollection<? extends T>, POutput> signalSuccessWhen(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate) {
return new PublishSuccessWhen<>(coder, formatter, successPredicate, resultTopicPath);
}
/**
* Invocation of {@link
* with {@link Object
*/
public <T> PTransform<PCollection<? extends T>, POutput> signalSuccessWhen(
Coder<T> coder, SerializableFunction<Set<T>, Boolean> successPredicate) {
return signalSuccessWhen(coder, T::toString, successPredicate);
}
/**
* Future that waits for a start signal for {@code duration}.
*
* <p>This future must be created before running the pipeline. A subscription must exist prior to
* the start signal being published, which occurs immediately upon pipeline startup.
*/
public Supplier<Void> waitForStart(Duration duration) throws IOException {
SubscriptionPath startSubscriptionPath =
PubsubClient.subscriptionPathFromName(
pipelineOptions.getProject(),
"start-subscription-" + String.valueOf(ThreadLocalRandom.current().nextLong()));
pubsub.createSubscription(
startTopicPath, startSubscriptionPath, (int) duration.getStandardSeconds());
return Suppliers.memoize(
() -> {
try {
String result = pollForResultForDuration(startSubscriptionPath, duration);
checkState(START_SIGNAL_MESSAGE.equals(result));
return null;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
pubsub.deleteSubscription(startSubscriptionPath);
} catch (IOException e) {
LOG.error(String.format("Leaked PubSub subscription '%s'", startSubscriptionPath));
}
}
});
}
/** Wait for a success signal for {@code duration}. */
private String pollForResultForDuration(
SubscriptionPath signalSubscriptionPath, Duration duration) throws IOException {
List<PubsubClient.IncomingMessage> signal = null;
DateTime endPolling = DateTime.now().plus(duration.getMillis());
do {
try {
signal = pubsub.pull(DateTime.now().getMillis(), signalSubscriptionPath, 1, false);
if (signal.isEmpty()) {
continue;
}
pubsub.acknowledge(
signalSubscriptionPath, signal.stream().map(IncomingMessage::ackId).collect(toList()));
break;
} catch (StatusRuntimeException e) {
if (!Status.DEADLINE_EXCEEDED.equals(e.getStatus())) {
LOG.warn(
"(Will retry) Error while polling {} for signal: {}",
signalSubscriptionPath,
e.getStatus());
}
sleep(500);
}
} while (DateTime.now().isBefore(endPolling));
if (signal == null || signal.isEmpty()) {
throw new AssertionError(
String.format(
"Did not receive signal on %s in %ss",
signalSubscriptionPath, duration.getStandardSeconds()));
}
return signal.get(0).message().getData().toStringUtf8();
}
private void sleep(long t) {
try {
Thread.sleep(t);
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
}
/** {@link PTransform} that signals once when the pipeline has started. */
static class PublishStart extends PTransform<PBegin, PDone> {
private final TopicPath startTopicPath;
PublishStart(TopicPath startTopicPath) {
this.startTopicPath = startTopicPath;
}
@Override
public PDone expand(PBegin input) {
return input
.apply("Start signal", Create.of(START_SIGNAL_MESSAGE))
.apply(PubsubIO.writeStrings().to(startTopicPath.getPath()));
}
}
/** {@link PTransform} that for validates whether elements seen so far match success criteria. */
static class PublishSuccessWhen<T> extends PTransform<PCollection<? extends T>, POutput> {
private final Coder<T> coder;
private final SerializableFunction<T, String> formatter;
private final SerializableFunction<Set<T>, Boolean> successPredicate;
private final TopicPath resultTopicPath;
PublishSuccessWhen(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate,
TopicPath resultTopicPath) {
this.coder = coder;
this.formatter = formatter;
this.successPredicate = successPredicate;
this.resultTopicPath = resultTopicPath;
}
@Override
public POutput expand(PCollection<? extends T> input) {
return input
.apply(Window.into(new GlobalWindows()))
.apply(WithKeys.of("dummyKey"))
.apply(
"checkAllEventsForSuccess",
ParDo.of(new StatefulPredicateCheck<>(coder, formatter, successPredicate)))
.apply("publishSuccess", PubsubIO.writeStrings().to(resultTopicPath.getPath()));
}
}
/**
* Stateful {@link DoFn} which caches the elements it sees and checks whether they satisfy the
* predicate.
*
* <p>When predicate is satisfied outputs "SUCCESS". If predicate throws exception, outputs
* "FAILURE".
*/
static class StatefulPredicateCheck<T> extends DoFn<KV<String, ? extends T>, String> {
private final SerializableFunction<T, String> formatter;
private SerializableFunction<Set<T>, Boolean> successPredicate;
private static final String SEEN_EVENTS = "seenEvents";
@StateId(SEEN_EVENTS)
private final StateSpec<BagState<T>> seenEvents;
StatefulPredicateCheck(
Coder<T> coder,
SerializableFunction<T, String> formatter,
SerializableFunction<Set<T>, Boolean> successPredicate) {
this.seenEvents = StateSpecs.bag(coder);
this.formatter = formatter;
this.successPredicate = successPredicate;
}
@ProcessElement
public void processElement(
ProcessContext context, @StateId(SEEN_EVENTS) BagState<T> seenEvents) {
seenEvents.add(context.element().getValue());
ImmutableSet<T> eventsSoFar = ImmutableSet.copyOf(seenEvents.read());
try {
if (successPredicate.apply(eventsSoFar)) {
context.output("SUCCESS");
}
} catch (Throwable e) {
context.output("FAILURE: " + e.getMessage());
}
}
}
}
|
What's the reason for this?
|
public String getServiceProperty(TaskContext context, String unit, String property) {
return newCommandLine(context)
.add("systemctl", "show", "--property", property, "--value", unit + ".service")
.ignoreExitCode()
.execute()
.getOutput();
}
|
.ignoreExitCode()
|
public String getServiceProperty(TaskContext context, String unit, String property) {
return newCommandLine(context)
.add("systemctl", "show", "--property", property, "--value", unit + ".service")
.execute()
.getOutput();
}
|
class SystemCtl {
private static final Pattern PROPERTY_NAME_PATTERN = Pattern.compile("^[a-zA-Z]+$");
private static final Pattern UNIT_FILES_LISTED_PATTERN = Pattern.compile("([0-9]+) unit files listed\\.");
private static final Pattern ACTIVE_STATE_PROPERTY_PATTERN = createPropertyPattern("ActiveState");
private final Terminal terminal;
private boolean useSudo = false;
private static Pattern createPropertyPattern(String propertyName) {
if (!PROPERTY_NAME_PATTERN.matcher(propertyName).matches()) {
throw new IllegalArgumentException("Property name does not match " + PROPERTY_NAME_PATTERN);
}
String regex = String.format("(?md)^%s=(.*)$", propertyName);
return Pattern.compile(regex);
}
public SystemCtl(Terminal terminal) {
this.terminal = terminal;
}
/** Call all commands through sudo */
public SystemCtl withSudo() {
this.useSudo = true;
return this;
}
/** Returns whether this is configured to use sudo */
public boolean useSudo() {
return useSudo;
}
public void daemonReload(TaskContext taskContext) {
newCommandLine(taskContext).add("systemctl", "daemon-reload")
.execute();
}
public SystemCtlEnable enable(String unit) { return new SystemCtlEnable(unit); }
public SystemCtlDisable disable(String unit) { return new SystemCtlDisable(unit); }
public SystemCtlStart start(String unit) { return new SystemCtlStart(unit); }
public SystemCtlStop stop(String unit) { return new SystemCtlStop(unit); }
public SystemCtlRestart restart(String unit) { return new SystemCtlRestart(unit); }
public boolean serviceExists(TaskContext context, String unit) {
return newCommandLine(context)
.add("systemctl", "list-unit-files", unit + ".service").executeSilently()
.mapOutput(output -> {
Matcher matcher = UNIT_FILES_LISTED_PATTERN.matcher(output);
if (!matcher.find()) {
throw new IllegalArgumentException();
}
return !matcher.group(1).equals("0");
});
}
/** Returns true if the unit exists and is active (i.e. running). unit is e.g. "docker". */
public boolean isActive(TaskContext context, String unit) {
return newCommandLine(context)
.add("systemctl", "--quiet", "is-active", unit + ".service")
.ignoreExitCode()
.executeSilently()
.map(CommandResult::getExitCode) == 0;
}
private CommandLine newCommandLine(TaskContext context) {
var commandLine = terminal.newCommandLine(context);
if (useSudo) {
commandLine.add("sudo");
}
return commandLine;
}
public class SystemCtlEnable extends SystemCtlCommand {
private SystemCtlEnable(String unit) {
super("enable", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return isUnitEnabled(context);
}
}
public class SystemCtlDisable extends SystemCtlCommand {
private SystemCtlDisable(String unit) {
super("disable", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return !isUnitEnabled(context);
}
}
public class SystemCtlStart extends SystemCtlCommand {
private SystemCtlStart(String unit) {
super("start", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN);
return Objects.equals(activeState, "active");
}
}
public class SystemCtlStop extends SystemCtlCommand {
private SystemCtlStop(String unit) {
super("stop", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN);
return Objects.equals(activeState, "inactive");
}
}
public class SystemCtlRestart extends SystemCtlCommand {
private SystemCtlRestart(String unit) {
super("restart", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return false;
}
}
public abstract class SystemCtlCommand {
private final String command;
private final String unit;
private SystemCtlCommand(String command, String unit) {
this.command = command;
this.unit = unit;
}
protected abstract boolean isAlreadyConverged(TaskContext context);
public boolean converge(TaskContext context) {
if (isAlreadyConverged(context)) {
return false;
}
newCommandLine(context).add("systemctl", command, unit)
.execute();
return true;
}
/** Returns true if unit is enabled */
boolean isUnitEnabled(TaskContext context) {
return newCommandLine(context).add("systemctl", "--quiet", "is-enabled", unit)
.ignoreExitCode()
.executeSilently()
.map(CommandResult::getExitCode) == 0;
}
/**
* @param propertyPattern Pattern to match the output of systemctl show command with
* exactly 1 group. The matchng group must exist.
* @return The matched group from the 'systemctl show' output.
*/
String getSystemCtlProperty(TaskContext context, Pattern propertyPattern) {
return newCommandLine(context).add("systemctl", "show", unit)
.executeSilently()
.mapOutput(output -> extractProperty(output, propertyPattern));
}
}
/**
* Find the systemd property value of the property (given by propertyPattern)
* matching the 'systemctl show' output (given by showProcess).
*/
private static String extractProperty(String showOutput, Pattern propertyPattern) {
Matcher matcher = propertyPattern.matcher(showOutput);
if (!matcher.find()) {
throw new IllegalArgumentException("Pattern '" + propertyPattern +
"' didn't match output");
} else if (matcher.groupCount() != 1) {
throw new IllegalArgumentException("Property pattern must have exactly 1 group");
}
return matcher.group(1);
}
}
|
class SystemCtl {
private static final Pattern PROPERTY_NAME_PATTERN = Pattern.compile("^[a-zA-Z]+$");
private static final Pattern UNIT_FILES_LISTED_PATTERN = Pattern.compile("([0-9]+) unit files listed\\.");
private static final Pattern ACTIVE_STATE_PROPERTY_PATTERN = createPropertyPattern("ActiveState");
private final Terminal terminal;
private boolean useSudo = false;
private static Pattern createPropertyPattern(String propertyName) {
if (!PROPERTY_NAME_PATTERN.matcher(propertyName).matches()) {
throw new IllegalArgumentException("Property name does not match " + PROPERTY_NAME_PATTERN);
}
String regex = String.format("(?md)^%s=(.*)$", propertyName);
return Pattern.compile(regex);
}
public SystemCtl(Terminal terminal) {
this.terminal = terminal;
}
/** Call all commands through sudo */
public SystemCtl withSudo() {
this.useSudo = true;
return this;
}
/** Returns whether this is configured to use sudo */
public boolean useSudo() {
return useSudo;
}
public void daemonReload(TaskContext taskContext) {
newCommandLine(taskContext).add("systemctl", "daemon-reload")
.execute();
}
public SystemCtlEnable enable(String unit) { return new SystemCtlEnable(unit); }
public SystemCtlDisable disable(String unit) { return new SystemCtlDisable(unit); }
public SystemCtlStart start(String unit) { return new SystemCtlStart(unit); }
public SystemCtlStop stop(String unit) { return new SystemCtlStop(unit); }
public SystemCtlRestart restart(String unit) { return new SystemCtlRestart(unit); }
public boolean serviceExists(TaskContext context, String unit) {
return newCommandLine(context)
.add("systemctl", "list-unit-files", unit + ".service").executeSilently()
.mapOutput(output -> {
Matcher matcher = UNIT_FILES_LISTED_PATTERN.matcher(output);
if (!matcher.find()) {
throw new IllegalArgumentException();
}
return !matcher.group(1).equals("0");
});
}
/** Returns true if the unit exists and is active (i.e. running). unit is e.g. "docker". */
public boolean isActive(TaskContext context, String unit) {
return newCommandLine(context)
.add("systemctl", "--quiet", "is-active", unit + ".service")
.ignoreExitCode()
.executeSilently()
.map(CommandResult::getExitCode) == 0;
}
private CommandLine newCommandLine(TaskContext context) {
var commandLine = terminal.newCommandLine(context);
if (useSudo) {
commandLine.add("sudo");
}
return commandLine;
}
public class SystemCtlEnable extends SystemCtlCommand {
private SystemCtlEnable(String unit) {
super("enable", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return isUnitEnabled(context);
}
}
public class SystemCtlDisable extends SystemCtlCommand {
private SystemCtlDisable(String unit) {
super("disable", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return !isUnitEnabled(context);
}
}
public class SystemCtlStart extends SystemCtlCommand {
private SystemCtlStart(String unit) {
super("start", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN);
return Objects.equals(activeState, "active");
}
}
public class SystemCtlStop extends SystemCtlCommand {
private SystemCtlStop(String unit) {
super("stop", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN);
return Objects.equals(activeState, "inactive");
}
}
public class SystemCtlRestart extends SystemCtlCommand {
private SystemCtlRestart(String unit) {
super("restart", unit);
}
protected boolean isAlreadyConverged(TaskContext context) {
return false;
}
}
public abstract class SystemCtlCommand {
private final String command;
private final String unit;
private SystemCtlCommand(String command, String unit) {
this.command = command;
this.unit = unit;
}
protected abstract boolean isAlreadyConverged(TaskContext context);
public boolean converge(TaskContext context) {
if (isAlreadyConverged(context)) {
return false;
}
newCommandLine(context).add("systemctl", command, unit)
.execute();
return true;
}
/** Returns true if unit is enabled */
boolean isUnitEnabled(TaskContext context) {
return newCommandLine(context).add("systemctl", "--quiet", "is-enabled", unit)
.ignoreExitCode()
.executeSilently()
.map(CommandResult::getExitCode) == 0;
}
/**
* @param propertyPattern Pattern to match the output of systemctl show command with
* exactly 1 group. The matchng group must exist.
* @return The matched group from the 'systemctl show' output.
*/
String getSystemCtlProperty(TaskContext context, Pattern propertyPattern) {
return newCommandLine(context).add("systemctl", "show", unit)
.executeSilently()
.mapOutput(output -> extractProperty(output, propertyPattern));
}
}
/**
* Find the systemd property value of the property (given by propertyPattern)
* matching the 'systemctl show' output (given by showProcess).
*/
private static String extractProperty(String showOutput, Pattern propertyPattern) {
Matcher matcher = propertyPattern.matcher(showOutput);
if (!matcher.find()) {
throw new IllegalArgumentException("Pattern '" + propertyPattern +
"' didn't match output");
} else if (matcher.groupCount() != 1) {
throw new IllegalArgumentException("Property pattern must have exactly 1 group");
}
return matcher.group(1);
}
}
|
> I would prefer the cancellation mechanism to stay clean and `canceled` being set to `true` only in one place, without using exceptions for that. Understood and I'd ideally would do that, but we already have the situation that the gates are closed before `#cancel()` has been called (or else the exception would have been swallowed by your commit). Then, the whole cleanup of Task#invoke may happen without canceled being set at all and I wouldn't be surprised to see the next issue arising. To further emphasize, if you check `StreamTask#invoke` you'd see that if `canceled` is set, we'd skip `afterInvoke` and also take the exceptional code path. If we just swallow the exception and do not set canceled, we'd go a whole other code path.
|
private void runMailboxLoop() throws Exception {
try {
try {
mailboxProcessor.runMailboxLoop();
}
catch (WrappingRuntimeException wrappingException) {
Throwable unwrapped = wrappingException.unwrap();
if (unwrapped instanceof Exception) {
throw (Exception) unwrapped;
}
else {
throw wrappingException;
}
}
}
catch (InterruptedException e) {
if (!canceled) {
Thread.currentThread().interrupt();
throw e;
}
}
catch (CancelTaskException e) {
canceled = true;
}
catch (Exception e) {
if (canceled) {
LOG.warn("Error while canceling task.", e);
}
else {
throw e;
}
}
}
|
canceled = true;
|
private void runMailboxLoop() throws Exception {
try {
try {
mailboxProcessor.runMailboxLoop();
}
catch (WrappingRuntimeException wrappingException) {
Throwable unwrapped = wrappingException.unwrap();
if (unwrapped instanceof Exception) {
throw (Exception) unwrapped;
}
else {
throw wrappingException;
}
}
}
catch (InterruptedException e) {
if (!canceled) {
Thread.currentThread().interrupt();
throw e;
}
}
catch (CancelTaskException e) {
if (!canceled) {
LOG.error("Received CancelTaskException while we are not canceled. This is a bug and should be reported", e);
}
}
catch (Exception e) {
if (canceled) {
LOG.warn("Error while canceling task.", e);
}
else {
throw e;
}
}
}
|
class StreamTask<OUT, OP extends StreamOperator<OUT>>
extends AbstractInvokable
implements AsyncExceptionHandler {
/** The thread group that holds all trigger timer threads. */
public static final ThreadGroup TRIGGER_THREAD_GROUP = new ThreadGroup("Triggers");
/** The logger used by the StreamTask and its subclasses. */
protected static final Logger LOG = LoggerFactory.getLogger(StreamTask.class);
/**
* All actions outside of the task {@link
* to ensure that we don't have concurrent method calls that void consistent checkpoints.
* <p>CheckpointLock is superseded by {@link MailboxExecutor}, with
* {@link StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor SynchronizedStreamTaskActionExecutor}
* to provide lock to {@link SourceStreamTask} (will be pushed down later). </p>
* {@link StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor SynchronizedStreamTaskActionExecutor}
* will be replaced <b>here</b> with {@link StreamTaskActionExecutor} once {@link
*/
private final StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor;
/**
* The input processor. Initialized in {@link
*/
@Nullable
protected StreamInputProcessor inputProcessor;
/** the head operator that consumes the input streams of this task. */
protected OP headOperator;
/** The chain of operators executed by this task. */
protected OperatorChain<OUT, OP> operatorChain;
/** The configuration of this streaming task. */
protected final StreamConfig configuration;
/** Our state backend. We use this to create checkpoint streams and a keyed state backend. */
protected StateBackend stateBackend;
/** The external storage where checkpoint data is persisted. */
private CheckpointStorageWorkerView checkpointStorage;
/**
* The internal {@link TimerService} used to define the current
* processing time (default = {@code System.currentTimeMillis()}) and
* register timers for tasks to be executed in the future.
*/
protected TimerService timerService;
private final Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
/** The map of user-defined accumulators of this task. */
private final Map<String, Accumulator<?, ?>> accumulatorMap;
/** The currently active background materialization threads. */
private final CloseableRegistry cancelables = new CloseableRegistry();
private final StreamTaskAsyncExceptionHandler asyncExceptionHandler;
/**
* Flag to mark the task "in operation", in which case check needs to be initialized to true,
* so that early cancel() before invoke() behaves correctly.
*/
private volatile boolean isRunning;
/** Flag to mark this task as canceled. */
private volatile boolean canceled;
private boolean disposedOperators;
/** Thread pool for async snapshot workers. */
private ExecutorService asyncOperationsThreadPool;
private final RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>> recordWriter;
protected final MailboxProcessor mailboxProcessor;
private Long syncSavepointId = null;
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* @param env The task environment for this task.
*/
protected StreamTask(Environment env) {
this(env, null);
}
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* @param env The task environment for this task.
* @param timerService Optionally, a specific timer service to use.
*/
protected StreamTask(Environment env, @Nullable TimerService timerService) {
this(env, timerService, FatalExitExceptionHandler.INSTANCE);
}
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
this(environment, timerService, uncaughtExceptionHandler, StreamTaskActionExecutor.synchronizedExecutor());
}
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* <p>This constructor accepts a special {@link TimerService}. By default (and if
* null is passes for the timer service) a {@link SystemProcessingTimeService DefaultTimerService}
* will be used.
*
* @param environment The task environment for this task.
* @param timerService Optionally, a specific timer service to use.
* @param uncaughtExceptionHandler to handle uncaught exceptions in the async operations thread pool
* @param actionExecutor a mean to wrap all actions performed by this task thread. Currently, only SynchronizedActionExecutor can be used to preserve locking semantics.
*/
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor) {
this(environment, timerService, uncaughtExceptionHandler, actionExecutor, new TaskMailboxImpl(Thread.currentThread()));
}
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor,
TaskMailbox mailbox) {
super(environment);
this.timerService = timerService;
this.uncaughtExceptionHandler = Preconditions.checkNotNull(uncaughtExceptionHandler);
this.configuration = new StreamConfig(getTaskConfiguration());
this.accumulatorMap = getEnvironment().getAccumulatorRegistry().getUserMap();
this.recordWriter = createRecordWriterDelegate(configuration, environment);
this.actionExecutor = Preconditions.checkNotNull(actionExecutor);
this.mailboxProcessor = new MailboxProcessor(this::processInput, mailbox, actionExecutor);
this.asyncExceptionHandler = new StreamTaskAsyncExceptionHandler(environment);
}
protected abstract void init() throws Exception;
protected void cancelTask() throws Exception {
}
protected void cleanup() throws Exception {
if (inputProcessor != null) {
inputProcessor.close();
}
}
/**
* This method implements the default action of the task (e.g. processing one event from the input). Implementations
* should (in general) be non-blocking.
*
* @param controller controller object for collaborative interaction between the action and the stream task.
* @throws Exception on any problems in the action.
*/
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
InputStatus status = inputProcessor.processInput();
if (status == InputStatus.MORE_AVAILABLE && recordWriter.isAvailable()) {
return;
}
if (status == InputStatus.END_OF_INPUT) {
controller.allActionsCompleted();
return;
}
CompletableFuture<?> jointFuture = getInputOutputJointFuture(status);
MailboxDefaultAction.Suspension suspendedDefaultAction = controller.suspendDefaultAction();
jointFuture.thenRun(suspendedDefaultAction::resume);
}
/**
* Considers three scenarios to combine input and output futures:
* 1. Both input and output are unavailable.
* 2. Only input is unavailable.
* 3. Only output is unavailable.
*/
private CompletableFuture<?> getInputOutputJointFuture(InputStatus status) {
if (status == InputStatus.NOTHING_AVAILABLE && !recordWriter.isAvailable()) {
return CompletableFuture.allOf(inputProcessor.getAvailableFuture(), recordWriter.getAvailableFuture());
} else if (status == InputStatus.NOTHING_AVAILABLE) {
return inputProcessor.getAvailableFuture();
} else {
return recordWriter.getAvailableFuture();
}
}
private void resetSynchronousSavepointId() {
syncSavepointId = null;
}
private void setSynchronousSavepointId(long checkpointId) {
Preconditions.checkState(
syncSavepointId == null, "at most one stop-with-savepoint checkpoint at a time is allowed");
syncSavepointId = checkpointId;
}
@VisibleForTesting
OptionalLong getSynchronousSavepointId() {
return syncSavepointId != null ? OptionalLong.of(syncSavepointId) : OptionalLong.empty();
}
private boolean isSynchronousSavepointId(long checkpointId) {
return syncSavepointId != null && syncSavepointId == checkpointId;
}
private void runSynchronousSavepointMailboxLoop() throws Exception {
assert syncSavepointId != null;
MailboxExecutor mailboxExecutor = mailboxProcessor.getMailboxExecutor(TaskMailbox.MAX_PRIORITY);
while (!canceled && syncSavepointId != null) {
mailboxExecutor.yield();
}
}
/**
* Emits the {@link org.apache.flink.streaming.api.watermark.Watermark
* so that all registered timers are fired.
*
* <p>This is used by the source task when the job is {@code TERMINATED}. In the case,
* we want all the timers registered throughout the pipeline to fire and the related
* state (e.g. windows) to be flushed.
*
* <p>For tasks other than the source task, this method does nothing.
*/
protected void advanceToEndOfEventTime() throws Exception {
}
/**
* Instructs the task to go through its normal termination routine, i.e. exit the run-loop
* and call {@link StreamOperator
*
* <p>This is used by the source task to get out of the run-loop when the job is stopped with a savepoint.
*
* <p>For tasks other than the source task, this method does nothing.
*/
protected void finishTask() throws Exception {
}
public StreamTaskStateInitializer createStreamTaskStateInitializer() {
return new StreamTaskStateInitializerImpl(
getEnvironment(),
stateBackend);
}
protected Counter setupNumRecordsInCounter(StreamOperator streamOperator) {
try {
return ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter();
} catch (Exception e) {
LOG.warn("An exception occurred during the metrics setup.", e);
return new SimpleCounter();
}
}
private void beforeInvoke() throws Exception {
disposedOperators = false;
LOG.debug("Initializing {}.", getName());
asyncOperationsThreadPool = Executors.newCachedThreadPool(new ExecutorThreadFactory("AsyncOperations", uncaughtExceptionHandler));
stateBackend = createStateBackend();
checkpointStorage = stateBackend.createCheckpointStorage(getEnvironment().getJobID());
if (timerService == null) {
ThreadFactory timerThreadFactory =
new DispatcherThreadFactory(TRIGGER_THREAD_GROUP, "Time Trigger for " + getName());
timerService = new SystemProcessingTimeService(
this::handleTimerException,
timerThreadFactory);
}
operatorChain = new OperatorChain<>(this, recordWriter);
headOperator = operatorChain.getHeadOperator();
init();
if (canceled) {
throw new CancelTaskException();
}
LOG.debug("Invoking {}", getName());
actionExecutor.runThrowing(() -> {
initializeStateAndOpen();
});
}
@Override
public final void invoke() throws Exception {
try {
beforeInvoke();
if (canceled) {
throw new CancelTaskException();
}
isRunning = true;
runMailboxLoop();
if (canceled) {
throw new CancelTaskException();
}
afterInvoke();
}
finally {
cleanUpInvoke();
}
}
private void afterInvoke() throws Exception {
LOG.debug("Finished task {}", getName());
actionExecutor.runThrowing(() -> {
closeAllOperators();
timerService.quiesce();
mailboxProcessor.prepareClose();
isRunning = false;
});
mailboxProcessor.drain();
timerService.awaitPendingAfterQuiesce();
LOG.debug("Closed operators for task {}", getName());
operatorChain.flushOutputs();
disposeAllOperators(false);
disposedOperators = true;
}
private void cleanUpInvoke() throws Exception {
isRunning = false;
setShouldInterruptOnCancel(false);
Thread.interrupted();
tryShutdownTimerService();
try {
cancelables.close();
shutdownAsyncThreads();
} catch (Throwable t) {
LOG.error("Could not shut down async checkpoint threads", t);
}
try {
cleanup();
} catch (Throwable t) {
LOG.error("Error during cleanup of stream task", t);
}
disposeAllOperators(true);
if (operatorChain != null) {
actionExecutor.run(() -> operatorChain.releaseOutputs());
} else {
recordWriter.close();
}
mailboxProcessor.close();
}
@Override
public final void cancel() throws Exception {
isRunning = false;
canceled = true;
try {
cancelTask();
}
finally {
mailboxProcessor.allActionsCompleted();
cancelables.close();
}
}
public MailboxExecutorFactory getMailboxExecutorFactory() {
return this.mailboxProcessor::getMailboxExecutor;
}
public final boolean isRunning() {
return isRunning;
}
public final boolean isCanceled() {
return canceled;
}
/**
* Execute {@link StreamOperator
* {@link StreamTask}. Closing happens from <b>head to tail</b> operator in the chain,
* contrary to {@link StreamOperator
* (see {@link
*/
private void closeAllOperators() throws Exception {
StreamOperator<?>[] allOperators = operatorChain.getAllOperators();
for (int i = allOperators.length - 1; i >= 0; i--) {
StreamOperator<?> operator = allOperators[i];
if (operator != null) {
operator.close();
}
if (i > 0) {
operatorChain.endNonHeadOperatorInput(allOperators[i - 1]);
}
}
}
private void shutdownAsyncThreads() throws Exception {
if (!asyncOperationsThreadPool.isShutdown()) {
asyncOperationsThreadPool.shutdownNow();
}
}
/**
* Execute @link StreamOperator
|
class StreamTask<OUT, OP extends StreamOperator<OUT>>
extends AbstractInvokable
implements AsyncExceptionHandler {
/** The thread group that holds all trigger timer threads. */
public static final ThreadGroup TRIGGER_THREAD_GROUP = new ThreadGroup("Triggers");
/** The logger used by the StreamTask and its subclasses. */
protected static final Logger LOG = LoggerFactory.getLogger(StreamTask.class);
/**
* All actions outside of the task {@link
* to ensure that we don't have concurrent method calls that void consistent checkpoints.
* <p>CheckpointLock is superseded by {@link MailboxExecutor}, with
* {@link StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor SynchronizedStreamTaskActionExecutor}
* to provide lock to {@link SourceStreamTask} (will be pushed down later). </p>
* {@link StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor SynchronizedStreamTaskActionExecutor}
* will be replaced <b>here</b> with {@link StreamTaskActionExecutor} once {@link
*/
private final StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor;
/**
* The input processor. Initialized in {@link
*/
@Nullable
protected StreamInputProcessor inputProcessor;
/** the head operator that consumes the input streams of this task. */
protected OP headOperator;
/** The chain of operators executed by this task. */
protected OperatorChain<OUT, OP> operatorChain;
/** The configuration of this streaming task. */
protected final StreamConfig configuration;
/** Our state backend. We use this to create checkpoint streams and a keyed state backend. */
protected StateBackend stateBackend;
/** The external storage where checkpoint data is persisted. */
private CheckpointStorageWorkerView checkpointStorage;
/**
* The internal {@link TimerService} used to define the current
* processing time (default = {@code System.currentTimeMillis()}) and
* register timers for tasks to be executed in the future.
*/
protected TimerService timerService;
private final Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
/** The map of user-defined accumulators of this task. */
private final Map<String, Accumulator<?, ?>> accumulatorMap;
/** The currently active background materialization threads. */
private final CloseableRegistry cancelables = new CloseableRegistry();
private final StreamTaskAsyncExceptionHandler asyncExceptionHandler;
/**
* Flag to mark the task "in operation", in which case check needs to be initialized to true,
* so that early cancel() before invoke() behaves correctly.
*/
private volatile boolean isRunning;
/** Flag to mark this task as canceled. */
private volatile boolean canceled;
private boolean disposedOperators;
/** Thread pool for async snapshot workers. */
private ExecutorService asyncOperationsThreadPool;
private final RecordWriterDelegate<SerializationDelegate<StreamRecord<OUT>>> recordWriter;
protected final MailboxProcessor mailboxProcessor;
private Long syncSavepointId = null;
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* @param env The task environment for this task.
*/
protected StreamTask(Environment env) {
this(env, null);
}
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* @param env The task environment for this task.
* @param timerService Optionally, a specific timer service to use.
*/
protected StreamTask(Environment env, @Nullable TimerService timerService) {
this(env, timerService, FatalExitExceptionHandler.INSTANCE);
}
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
this(environment, timerService, uncaughtExceptionHandler, StreamTaskActionExecutor.synchronizedExecutor());
}
/**
* Constructor for initialization, possibly with initial state (recovery / savepoint / etc).
*
* <p>This constructor accepts a special {@link TimerService}. By default (and if
* null is passes for the timer service) a {@link SystemProcessingTimeService DefaultTimerService}
* will be used.
*
* @param environment The task environment for this task.
* @param timerService Optionally, a specific timer service to use.
* @param uncaughtExceptionHandler to handle uncaught exceptions in the async operations thread pool
* @param actionExecutor a mean to wrap all actions performed by this task thread. Currently, only SynchronizedActionExecutor can be used to preserve locking semantics.
*/
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor) {
this(environment, timerService, uncaughtExceptionHandler, actionExecutor, new TaskMailboxImpl(Thread.currentThread()));
}
protected StreamTask(
Environment environment,
@Nullable TimerService timerService,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler,
StreamTaskActionExecutor.SynchronizedStreamTaskActionExecutor actionExecutor,
TaskMailbox mailbox) {
super(environment);
this.timerService = timerService;
this.uncaughtExceptionHandler = Preconditions.checkNotNull(uncaughtExceptionHandler);
this.configuration = new StreamConfig(getTaskConfiguration());
this.accumulatorMap = getEnvironment().getAccumulatorRegistry().getUserMap();
this.recordWriter = createRecordWriterDelegate(configuration, environment);
this.actionExecutor = Preconditions.checkNotNull(actionExecutor);
this.mailboxProcessor = new MailboxProcessor(this::processInput, mailbox, actionExecutor);
this.asyncExceptionHandler = new StreamTaskAsyncExceptionHandler(environment);
}
protected abstract void init() throws Exception;
protected void cancelTask() throws Exception {
}
protected void cleanup() throws Exception {
if (inputProcessor != null) {
inputProcessor.close();
}
}
/**
* This method implements the default action of the task (e.g. processing one event from the input). Implementations
* should (in general) be non-blocking.
*
* @param controller controller object for collaborative interaction between the action and the stream task.
* @throws Exception on any problems in the action.
*/
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
InputStatus status = inputProcessor.processInput();
if (status == InputStatus.MORE_AVAILABLE && recordWriter.isAvailable()) {
return;
}
if (status == InputStatus.END_OF_INPUT) {
controller.allActionsCompleted();
return;
}
CompletableFuture<?> jointFuture = getInputOutputJointFuture(status);
MailboxDefaultAction.Suspension suspendedDefaultAction = controller.suspendDefaultAction();
jointFuture.thenRun(suspendedDefaultAction::resume);
}
/**
* Considers three scenarios to combine input and output futures:
* 1. Both input and output are unavailable.
* 2. Only input is unavailable.
* 3. Only output is unavailable.
*/
private CompletableFuture<?> getInputOutputJointFuture(InputStatus status) {
if (status == InputStatus.NOTHING_AVAILABLE && !recordWriter.isAvailable()) {
return CompletableFuture.allOf(inputProcessor.getAvailableFuture(), recordWriter.getAvailableFuture());
} else if (status == InputStatus.NOTHING_AVAILABLE) {
return inputProcessor.getAvailableFuture();
} else {
return recordWriter.getAvailableFuture();
}
}
private void resetSynchronousSavepointId() {
syncSavepointId = null;
}
private void setSynchronousSavepointId(long checkpointId) {
Preconditions.checkState(
syncSavepointId == null, "at most one stop-with-savepoint checkpoint at a time is allowed");
syncSavepointId = checkpointId;
}
@VisibleForTesting
OptionalLong getSynchronousSavepointId() {
return syncSavepointId != null ? OptionalLong.of(syncSavepointId) : OptionalLong.empty();
}
private boolean isSynchronousSavepointId(long checkpointId) {
return syncSavepointId != null && syncSavepointId == checkpointId;
}
private void runSynchronousSavepointMailboxLoop() throws Exception {
assert syncSavepointId != null;
MailboxExecutor mailboxExecutor = mailboxProcessor.getMailboxExecutor(TaskMailbox.MAX_PRIORITY);
while (!canceled && syncSavepointId != null) {
mailboxExecutor.yield();
}
}
/**
* Emits the {@link org.apache.flink.streaming.api.watermark.Watermark
* so that all registered timers are fired.
*
* <p>This is used by the source task when the job is {@code TERMINATED}. In the case,
* we want all the timers registered throughout the pipeline to fire and the related
* state (e.g. windows) to be flushed.
*
* <p>For tasks other than the source task, this method does nothing.
*/
protected void advanceToEndOfEventTime() throws Exception {
}
/**
* Instructs the task to go through its normal termination routine, i.e. exit the run-loop
* and call {@link StreamOperator
*
* <p>This is used by the source task to get out of the run-loop when the job is stopped with a savepoint.
*
* <p>For tasks other than the source task, this method does nothing.
*/
protected void finishTask() throws Exception {
}
public StreamTaskStateInitializer createStreamTaskStateInitializer() {
return new StreamTaskStateInitializerImpl(
getEnvironment(),
stateBackend);
}
protected Counter setupNumRecordsInCounter(StreamOperator streamOperator) {
try {
return ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter();
} catch (Exception e) {
LOG.warn("An exception occurred during the metrics setup.", e);
return new SimpleCounter();
}
}
private void beforeInvoke() throws Exception {
disposedOperators = false;
LOG.debug("Initializing {}.", getName());
asyncOperationsThreadPool = Executors.newCachedThreadPool(new ExecutorThreadFactory("AsyncOperations", uncaughtExceptionHandler));
stateBackend = createStateBackend();
checkpointStorage = stateBackend.createCheckpointStorage(getEnvironment().getJobID());
if (timerService == null) {
ThreadFactory timerThreadFactory =
new DispatcherThreadFactory(TRIGGER_THREAD_GROUP, "Time Trigger for " + getName());
timerService = new SystemProcessingTimeService(
this::handleTimerException,
timerThreadFactory);
}
operatorChain = new OperatorChain<>(this, recordWriter);
headOperator = operatorChain.getHeadOperator();
init();
if (canceled) {
throw new CancelTaskException();
}
LOG.debug("Invoking {}", getName());
actionExecutor.runThrowing(() -> {
initializeStateAndOpen();
});
}
@Override
public final void invoke() throws Exception {
try {
beforeInvoke();
if (canceled) {
throw new CancelTaskException();
}
isRunning = true;
runMailboxLoop();
if (canceled) {
throw new CancelTaskException();
}
afterInvoke();
}
finally {
cleanUpInvoke();
}
}
private void afterInvoke() throws Exception {
LOG.debug("Finished task {}", getName());
actionExecutor.runThrowing(() -> {
closeAllOperators();
timerService.quiesce();
mailboxProcessor.prepareClose();
isRunning = false;
});
mailboxProcessor.drain();
timerService.awaitPendingAfterQuiesce();
LOG.debug("Closed operators for task {}", getName());
operatorChain.flushOutputs();
disposeAllOperators(false);
disposedOperators = true;
}
private void cleanUpInvoke() throws Exception {
isRunning = false;
setShouldInterruptOnCancel(false);
Thread.interrupted();
tryShutdownTimerService();
try {
cancelables.close();
shutdownAsyncThreads();
} catch (Throwable t) {
LOG.error("Could not shut down async checkpoint threads", t);
}
try {
cleanup();
} catch (Throwable t) {
LOG.error("Error during cleanup of stream task", t);
}
disposeAllOperators(true);
if (operatorChain != null) {
actionExecutor.run(() -> operatorChain.releaseOutputs());
} else {
recordWriter.close();
}
mailboxProcessor.close();
}
@Override
public final void cancel() throws Exception {
isRunning = false;
canceled = true;
try {
cancelTask();
}
finally {
mailboxProcessor.allActionsCompleted();
cancelables.close();
}
}
public MailboxExecutorFactory getMailboxExecutorFactory() {
return this.mailboxProcessor::getMailboxExecutor;
}
public final boolean isRunning() {
return isRunning;
}
public final boolean isCanceled() {
return canceled;
}
/**
* Execute {@link StreamOperator
* {@link StreamTask}. Closing happens from <b>head to tail</b> operator in the chain,
* contrary to {@link StreamOperator
* (see {@link
*/
private void closeAllOperators() throws Exception {
StreamOperator<?>[] allOperators = operatorChain.getAllOperators();
for (int i = allOperators.length - 1; i >= 0; i--) {
StreamOperator<?> operator = allOperators[i];
if (operator != null) {
operator.close();
}
if (i > 0) {
operatorChain.endNonHeadOperatorInput(allOperators[i - 1]);
}
}
}
private void shutdownAsyncThreads() throws Exception {
if (!asyncOperationsThreadPool.isShutdown()) {
asyncOperationsThreadPool.shutdownNow();
}
}
/**
* Execute @link StreamOperator
|
We are starting with printing out the size. The for loop, it loops all mined opinions.
|
public static void main(String[] args) {
TextAnalyticsClient client = new TextAnalyticsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
String document = "Bad atmosphere. Not close to plenty of restaurants, hotels, and transit! Staff are not friendly and helpful.";
System.out.printf("Text = %s%n", document);
AnalyzeSentimentOptions options = new AnalyzeSentimentOptions().setIncludeOpinionMining(true);
final DocumentSentiment documentSentiment = client.analyzeSentiment(document, "en", options);
SentimentConfidenceScores scores = documentSentiment.getConfidenceScores();
System.out.printf(
"Recognized document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n",
documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative());
List<MinedOpinion> positiveMinedOpinions = new ArrayList<>();
List<MinedOpinion> mixedMinedOpinions = new ArrayList<>();
List<MinedOpinion> negativeMinedOpinions = new ArrayList<>();
documentSentiment.getSentences().forEach(sentenceSentiment -> {
SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores();
System.out.printf("\tSentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n",
sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative());
sentenceSentiment.getMinedOpinions().forEach(minedOpinion -> {
TextSentiment aspectTextSentiment = minedOpinion.getAspect().getSentiment();
if (NEGATIVE.equals(aspectTextSentiment)) {
negativeMinedOpinions.add(minedOpinion);
} else if (POSITIVE.equals(aspectTextSentiment)) {
positiveMinedOpinions.add(minedOpinion);
} else if (MIXED.equals(aspectTextSentiment)) {
mixedMinedOpinions.add(minedOpinion);
}
});
});
System.out.printf("Positive aspects count: %d%n", positiveMinedOpinions.size());
for (MinedOpinion positiveMinedOpinion : positiveMinedOpinions) {
System.out.printf("\tAspect: %s%n", positiveMinedOpinion.getAspect().getText());
for (OpinionSentiment opinionSentiment : positiveMinedOpinion.getOpinions()) {
System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the aspect negated: %s.%n",
opinionSentiment.getSentiment(), opinionSentiment.getText(), opinionSentiment.isNegated());
}
}
System.out.printf("Mixed aspects count: %d%n", mixedMinedOpinions.size());
for (MinedOpinion mixedMinedOpinion : mixedMinedOpinions) {
System.out.printf("\tAspect: %s%n", mixedMinedOpinion.getAspect().getText());
for (OpinionSentiment opinionSentiment : mixedMinedOpinion.getOpinions()) {
System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the aspect negated: %s.%n",
opinionSentiment.getSentiment(), opinionSentiment.getText(), opinionSentiment.isNegated());
}
}
System.out.printf("Negative aspects count: %d%n", negativeMinedOpinions.size());
for (MinedOpinion negativeMinedOpinion : negativeMinedOpinions) {
System.out.printf("\tAspect: %s%n", negativeMinedOpinion.getAspect().getText());
for (OpinionSentiment opinionSentiment : negativeMinedOpinion.getOpinions()) {
System.out.printf("\t\t'%s' sentiment because of \"%s\". Is the aspect negated: %s.%n",
opinionSentiment.getSentiment(), opinionSentiment.getText(), opinionSentiment.isNegated());
}
}
}
|
System.out.printf("Positive aspects count: %d%n", positiveMinedOpinions.size());
|
public static void main(String[] args) {
TextAnalyticsClient client = new TextAnalyticsClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("{endpoint}")
.buildClient();
String document = "Bad atmosphere. Not close to plenty of restaurants, hotels, and transit! Staff are not friendly and helpful.";
System.out.printf("Text = %s%n", document);
AnalyzeSentimentOptions options = new AnalyzeSentimentOptions().setIncludeOpinionMining(true);
final DocumentSentiment documentSentiment = client.analyzeSentiment(document, "en", options);
SentimentConfidenceScores scores = documentSentiment.getConfidenceScores();
System.out.printf(
"Recognized document sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n",
documentSentiment.getSentiment(), scores.getPositive(), scores.getNeutral(), scores.getNegative());
documentSentiment.getSentences().forEach(sentenceSentiment -> {
SentimentConfidenceScores sentenceScores = sentenceSentiment.getConfidenceScores();
System.out.printf("\tSentence sentiment: %s, positive score: %f, neutral score: %f, negative score: %f.%n",
sentenceSentiment.getSentiment(), sentenceScores.getPositive(), sentenceScores.getNeutral(), sentenceScores.getNegative());
sentenceSentiment.getMinedOpinions().forEach(minedOpinions -> {
AspectSentiment aspectSentiment = minedOpinions.getAspect();
System.out.printf("\t\tAspect sentiment: %s, aspect text: %s%n", aspectSentiment.getSentiment(),
aspectSentiment.getText());
for (OpinionSentiment opinionSentiment : minedOpinions.getOpinions()) {
System.out.printf("\t\t\t'%s' opinion sentiment because of \"%s\". Is the opinion negated: %s.%n",
opinionSentiment.getSentiment(), opinionSentiment.getText(), opinionSentiment.isNegated());
}
});
});
}
|
class AnalyzeSentimentWithOpinionMining {
/**
* Main method to invoke this demo about how to analyze the sentiment of document.
*
* @param args Unused arguments to the program.
*/
}
|
class AnalyzeSentimentWithOpinionMining {
/**
* Main method to invoke this demo about how to analyze the sentiment of document.
*
* @param args Unused arguments to the program.
*/
}
|
Hi @rsvoboda I wonder if updating `QuarkusIntegrationTestExtension` is too early, it is fine as it is, but `DevServicesContext` has only been introduced in `main` so removing the ability to the support the injection of `QuarkusIntegrationTest.Context` is early, not sure how much it is used but I can imagine the users seeing theor integration tests starting failing with `2.3.x`, just occurred to me this morning... I think adding a note that it is deprecated to the 2.3 migration guide is what is needed at this stage
|
private void injectTestContext(Object testInstance) {
Class<?> c = testInstance.getClass();
while (c != Object.class) {
for (Field f : c.getDeclaredFields()) {
if (f.getType().equals(DevServicesContext.class)) {
try {
f.setAccessible(true);
f.set(testInstance, createTestContext());
return;
} catch (Exception e) {
throw new RuntimeException("Unable to set field '" + f.getName()
+ "' with the proper test context", e);
}
} else if (DevServicesContext.ContextAware.class.isAssignableFrom(f.getType())) {
f.setAccessible(true);
try {
DevServicesContext.ContextAware val = (DevServicesContext.ContextAware) f.get(testInstance);
val.setIntegrationTestContext(createTestContext());
} catch (Exception e) {
throw new RuntimeException("Unable to inject context into field " + f.getName(), e);
}
}
}
c = c.getSuperclass();
}
}
|
} catch (Exception e) {
|
private void injectTestContext(Object testInstance) {
Class<?> c = testInstance.getClass();
while (c != Object.class) {
for (Field f : c.getDeclaredFields()) {
if (f.getType().equals(DevServicesContext.class)) {
try {
f.setAccessible(true);
f.set(testInstance, createTestContext());
return;
} catch (Exception e) {
throw new RuntimeException("Unable to set field '" + f.getName()
+ "' with the proper test context", e);
}
} else if (DevServicesContext.ContextAware.class.isAssignableFrom(f.getType())) {
f.setAccessible(true);
try {
DevServicesContext.ContextAware val = (DevServicesContext.ContextAware) f.get(testInstance);
val.setIntegrationTestContext(createTestContext());
} catch (Exception e) {
throw new RuntimeException("Unable to inject context into field " + f.getName(), e);
}
}
}
c = c.getSuperclass();
}
}
|
class and if we had or will have per-test test resources
boolean reloadTestResources = !Objects.equals(extensionContext.getRequiredTestClass(), currentJUnitTestClass)
&& (hasPerTestResources || QuarkusTestExtension.hasPerTestResources(extensionContext));
if ((state == null && !failedBoot) || wrongProfile || reloadTestResources) {
if (wrongProfile) {
if (state != null) {
try {
state.close();
} catch (Throwable throwable) {
throwable.printStackTrace();
}
}
}
try {
state = doProcessStart(quarkusArtifactProperties, selectedProfile, extensionContext);
store.put(IntegrationTestExtensionState.class.getName(), state);
} catch (Throwable e) {
failedBoot = true;
firstException = e;
}
}
|
class and if we had or will have per-test test resources
boolean reloadTestResources = !Objects.equals(extensionContext.getRequiredTestClass(), currentJUnitTestClass)
&& (hasPerTestResources || QuarkusTestExtension.hasPerTestResources(extensionContext));
if ((state == null && !failedBoot) || wrongProfile || reloadTestResources) {
if (wrongProfile) {
if (state != null) {
try {
state.close();
} catch (Throwable throwable) {
throwable.printStackTrace();
}
}
}
try {
state = doProcessStart(quarkusArtifactProperties, selectedProfile, extensionContext);
store.put(IntegrationTestExtensionState.class.getName(), state);
} catch (Throwable e) {
failedBoot = true;
firstException = e;
}
}
|
> I'm still not convinced about this test. It's reimplementing production code in many aspects. For example exception handling (this exception ignoring during cancelation/closing) and threading model from Task/StreamTask, so at any time we modify those, this test has a chance to fail. I guess the reimplementing production code refers to processRecoveredBufferTask which will consumed the recovered buffers and verify the data. This test actually only focuses on RecoveredInputChannel#readRecoveredState method, so as long as the related interaction processes is not changed, this test is still valid, no matter with how the upper component handles the CancelTaskException (ignore or throw it) . In general the unit tests should avoid touching more components to bring potential fragile if some changes future. From this aspect, maybe the ITCase is more suitable for this scenario as you said. But unit tests still have somehow meanings to work on the respective methods directly, then it might has more chances to cover all the potential cases.
|
private Callable<Void> readRecoveredStateTask(RecoveredInputChannel inputChannel, ChannelStateReader reader, boolean verifyRelease) {
return () -> {
try {
inputChannel.readRecoveredState(reader);
} catch (Throwable t) {
assertTrue("The expected exception should only happen in the case of released channel.", verifyRelease && inputChannel.isReleased());
}
return null;
};
}
|
assertTrue("The expected exception should only happen in the case of released channel.", verifyRelease && inputChannel.isReleased());
|
private Callable<Void> readRecoveredStateTask(RecoveredInputChannel inputChannel, ChannelStateReader reader, boolean verifyRelease) {
return () -> {
try {
inputChannel.readRecoveredState(reader);
} catch (Throwable t) {
if (!(verifyRelease && inputChannel.isReleased())) {
throw new AssertionError("Exceptions are expected here only if the input channel was released", t);
}
}
return null;
};
}
|
class RecoveredInputChannelTest {
private final boolean isRemote;
@Parameterized.Parameters(name = "isRemote = {0}")
public static Collection<Object[]> parameters() {
return Arrays.asList(new Object[][] {
{true},
{false},
});
}
public RecoveredInputChannelTest(boolean isRemote) {
this.isRemote = isRemote;
}
@Test
public void testConcurrentReadStateAndProcess() throws Exception {
testConcurrentReadStateAndProcess(isRemote);
}
@Test
public void testConcurrentReadStateAndRelease() throws Exception {
testConcurrentReadStateAndRelease(isRemote);
}
/**
* Tests that there are no potential deadlock and buffer leak issues while the following actions happen concurrently:
* 1. Task thread processes the recovered state buffer from RecoveredInputChannel.
* 2. Unspilling IO thread reads the recovered state and queues the buffer into RecoveredInputChannel.
* 3. Canceler thread closes the input gate and releases the RecoveredInputChannel.
*/
@Test
public void testConcurrentReadStateAndProcessAndRelease() throws Exception {
testConcurrentReadStateAndProcessAndRelease(isRemote);
}
/**
* Tests that there are no buffers leak while recovering the empty input channel state.
*/
@Test
public void testReadEmptyState() throws Exception {
testReadEmptyStateOrThrowException(isRemote, ChannelStateReader.NO_OP);
}
/**
* Tests that there are no buffers leak while throwing exception during state recovery.
*/
@Test(expected = IOException.class)
public void testReadStateWithException() throws Exception {
testReadEmptyStateOrThrowException(isRemote, new ChannelStateReaderWithException());
}
private void testReadEmptyStateOrThrowException(boolean isRemote, ChannelStateReader reader) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
inputChannel.readRecoveredState(reader);
assertEquals(1, inputChannel.getNumberOfQueuedBuffers());
assertFalse(inputChannel.getNextBuffer().isPresent());
assertTrue(inputChannel.getStateConsumedFuture().isDone());
} finally {
inputGate.close();
globalPool.destroyAllBufferPools();
assertEquals(totalBuffers, globalPool.getNumberOfAvailableMemorySegments());
globalPool.destroy();
}
}
/**
* Tests that the process of reading recovered state executes concurrently with channel
* buffer processing, based on the condition of the total number of states is more that
* the total buffer amount, to confirm that the lifecycle(recycle) of exclusive/floating
* buffers works well.
*/
private void testConcurrentReadStateAndProcess(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
final Callable<Void> processTask = processRecoveredBufferTask(inputChannel, totalStates, states, false);
final Callable<Void> readStateTask = readRecoveredStateTask(inputChannel, reader, false);
submitTasksAndWaitForResults(executor, new Callable[] {readStateTask, processTask});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private void testConcurrentReadStateAndRelease(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
submitTasksAndWaitForResults(
executor,
new Callable[] {readRecoveredStateTask(inputChannel, reader, true), releaseChannelTask(inputChannel)});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private void testConcurrentReadStateAndProcessAndRelease(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
final Callable<Void> processTask = processRecoveredBufferTask(inputChannel, totalStates, states, true);
final Callable<Void> readStateTask = readRecoveredStateTask(inputChannel, reader, true);
final Callable<Void> releaseTask = releaseChannelTask(inputChannel);
submitTasksAndWaitForResults(executor, new Callable[] {readStateTask, processTask, releaseTask});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private Callable<Void> processRecoveredBufferTask(RecoveredInputChannel inputChannel, int totalStates, int[] states, boolean verifyRelease) {
return () -> {
int numProcessedStates = 0;
while (numProcessedStates < totalStates) {
if (verifyRelease && inputChannel.isReleased()) {
break;
}
if (inputChannel.getNumberOfQueuedBuffers() == 0) {
Thread.sleep(1);
continue;
}
try {
Optional<BufferAndAvailability> bufferAndAvailability = inputChannel.getNextBuffer();
if (bufferAndAvailability.isPresent()) {
Buffer buffer = bufferAndAvailability.get().buffer();
BufferBuilderAndConsumerTest.assertContent(buffer, null, states);
buffer.recycleBuffer();
numProcessedStates++;
}
} catch (Throwable t) {
assertTrue("The expected exception should only happen in the case of released channel.", verifyRelease && inputChannel.isReleased());
}
}
return null;
};
}
private Callable<Void> releaseChannelTask(RecoveredInputChannel inputChannel) {
return () -> {
inputChannel.releaseAllResources();
return null;
};
}
private RecoveredInputChannel createRecoveredChannel(boolean isRemote, SingleInputGate gate) {
if (isRemote) {
return new InputChannelBuilder().buildRemoteRecoveredChannel(gate);
} else {
return new InputChannelBuilder().buildLocalRecoveredChannel(gate);
}
}
private SingleInputGate createInputGate(NetworkBufferPool globalPool) throws Exception {
return new SingleInputGateBuilder()
.setBufferPoolFactory(globalPool.createBufferPool(8, 8))
.setSegmentProvider(globalPool)
.build();
}
}
|
class RecoveredInputChannelTest {
private final boolean isRemote;
@Parameterized.Parameters(name = "isRemote = {0}")
public static Collection<Object[]> parameters() {
return Arrays.asList(new Object[][] {
{true},
{false},
});
}
public RecoveredInputChannelTest(boolean isRemote) {
this.isRemote = isRemote;
}
@Test
public void testConcurrentReadStateAndProcess() throws Exception {
testConcurrentReadStateAndProcess(isRemote);
}
@Test
public void testConcurrentReadStateAndRelease() throws Exception {
testConcurrentReadStateAndRelease(isRemote);
}
/**
* Tests that there are no potential deadlock and buffer leak issues while the following actions happen concurrently:
* 1. Task thread processes the recovered state buffer from RecoveredInputChannel.
* 2. Unspilling IO thread reads the recovered state and queues the buffer into RecoveredInputChannel.
* 3. Canceler thread closes the input gate and releases the RecoveredInputChannel.
*/
@Test
public void testConcurrentReadStateAndProcessAndRelease() throws Exception {
testConcurrentReadStateAndProcessAndRelease(isRemote);
}
/**
* Tests that there are no buffers leak while recovering the empty input channel state.
*/
@Test
public void testReadEmptyState() throws Exception {
testReadEmptyStateOrThrowException(isRemote, ChannelStateReader.NO_OP);
}
/**
* Tests that there are no buffers leak while throwing exception during state recovery.
*/
@Test(expected = IOException.class)
public void testReadStateWithException() throws Exception {
testReadEmptyStateOrThrowException(isRemote, new ChannelStateReaderWithException());
}
private void testReadEmptyStateOrThrowException(boolean isRemote, ChannelStateReader reader) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
inputChannel.readRecoveredState(reader);
assertEquals(1, inputChannel.getNumberOfQueuedBuffers());
assertFalse(inputChannel.getNextBuffer().isPresent());
assertTrue(inputChannel.getStateConsumedFuture().isDone());
} finally {
inputGate.close();
globalPool.destroyAllBufferPools();
assertEquals(totalBuffers, globalPool.getNumberOfAvailableMemorySegments());
globalPool.destroy();
}
}
/**
* Tests that the process of reading recovered state executes concurrently with channel
* buffer processing, based on the condition of the total number of states is more that
* the total buffer amount, to confirm that the lifecycle(recycle) of exclusive/floating
* buffers works well.
*/
private void testConcurrentReadStateAndProcess(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
final Callable<Void> processTask = processRecoveredBufferTask(inputChannel, totalStates, states, false);
final Callable<Void> readStateTask = readRecoveredStateTask(inputChannel, reader, false);
submitTasksAndWaitForResults(executor, new Callable[] {readStateTask, processTask});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private void testConcurrentReadStateAndRelease(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
submitTasksAndWaitForResults(
executor,
new Callable[] {readRecoveredStateTask(inputChannel, reader, true), releaseChannelTask(inputChannel)});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private void testConcurrentReadStateAndProcessAndRelease(boolean isRemote) throws Exception {
final int totalBuffers = 10;
final NetworkBufferPool globalPool = new NetworkBufferPool(totalBuffers, 32, 2);
final SingleInputGate inputGate = createInputGate(globalPool);
final RecoveredInputChannel inputChannel = createRecoveredChannel(isRemote, inputGate);
final int totalStates = 15;
final int[] states = {1, 2, 3, 4};
final ChannelStateReader reader = new ResultPartitionTest.FiniteChannelStateReader(totalStates, states);
final ExecutorService executor = Executors.newFixedThreadPool(2);
Throwable thrown = null;
try {
inputGate.setInputChannels(inputChannel);
inputGate.setup();
final Callable<Void> processTask = processRecoveredBufferTask(inputChannel, totalStates, states, true);
final Callable<Void> readStateTask = readRecoveredStateTask(inputChannel, reader, true);
final Callable<Void> releaseTask = releaseChannelTask(inputChannel);
submitTasksAndWaitForResults(executor, new Callable[] {readStateTask, processTask, releaseTask});
} catch (Throwable t) {
thrown = t;
} finally {
cleanup(globalPool, executor, null, thrown, inputChannel);
}
}
private Callable<Void> processRecoveredBufferTask(RecoveredInputChannel inputChannel, int totalStates, int[] states, boolean verifyRelease) {
return () -> {
int numProcessedStates = 0;
while (numProcessedStates < totalStates) {
if (verifyRelease && inputChannel.isReleased()) {
break;
}
if (inputChannel.getNumberOfQueuedBuffers() == 0) {
Thread.sleep(1);
continue;
}
try {
Optional<BufferAndAvailability> bufferAndAvailability = inputChannel.getNextBuffer();
if (bufferAndAvailability.isPresent()) {
Buffer buffer = bufferAndAvailability.get().buffer();
BufferBuilderAndConsumerTest.assertContent(buffer, null, states);
buffer.recycleBuffer();
numProcessedStates++;
}
} catch (Throwable t) {
if (!(verifyRelease && inputChannel.isReleased())) {
throw new AssertionError("Exceptions are expected here only if the input channel was released", t);
}
}
}
return null;
};
}
private Callable<Void> releaseChannelTask(RecoveredInputChannel inputChannel) {
return () -> {
inputChannel.releaseAllResources();
return null;
};
}
private RecoveredInputChannel createRecoveredChannel(boolean isRemote, SingleInputGate gate) {
if (isRemote) {
return new InputChannelBuilder().buildRemoteRecoveredChannel(gate);
} else {
return new InputChannelBuilder().buildLocalRecoveredChannel(gate);
}
}
private SingleInputGate createInputGate(NetworkBufferPool globalPool) throws Exception {
return new SingleInputGateBuilder()
.setBufferPoolFactory(globalPool.createBufferPool(8, 8))
.setSegmentProvider(globalPool)
.build();
}
}
|
Can we assert the exception message here?
|
public void testReadBuildJsonForNonExistingBuildFile() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve("xyz").resolve(ProjectConstants.BUILD_FILE);
Assert.assertThrows(IOException.class, () -> {
ProjectUtils.readBuildJson(buildFilePath);
});
}
|
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve("xyz").resolve(ProjectConstants.BUILD_FILE);
|
public void testReadBuildJsonForNonExistingBuildFile() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve("xyz").resolve(ProjectConstants.BUILD_FILE);
Assert.assertThrows(IOException.class, () -> {
ProjectUtils.readBuildJson(buildFilePath);
});
}
|
class ProjectUtilsTests {
private static final Path RESOURCE_DIRECTORY = Paths.get("src", "test", "resources");
private static final Path PROJECT_UTILS_RESOURCES = RESOURCE_DIRECTORY.resolve("project-utils");
private static Path tempDirectory;
private static BuildJson buildJson;
@BeforeClass
public void setUp() throws IOException {
tempDirectory = Files.createTempDirectory("b7a-project-utils-test-" + System.nanoTime());
buildJson = new BuildJson(1629359520, 1629259520);
}
@Test
public void testReadBuildJson() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve(ProjectConstants.BUILD_FILE);
try {
BuildJson buildJson = ProjectUtils.readBuildJson(buildFilePath);
Assert.assertEquals(buildJson.lastBuildTime(), 1629359520);
Assert.assertEquals(buildJson.lastUpdateTime(), 1629259520);
} catch (Exception e) {
Assert.fail("Reading Build Json failed");
}
}
@Test()
@Test()
public void testReadBuildJsonForInvalidBuildFile() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve("invalid-build");
Assert.assertThrows(JsonSyntaxException.class, () -> {
ProjectUtils.readBuildJson(buildFilePath);
});
}
@Test
public void testWriteBuildFile() throws IOException {
Path buildFilePath = tempDirectory.resolve(ProjectConstants.BUILD_FILE);
Files.createFile(buildFilePath);
ProjectUtils.writeBuildFile(buildFilePath, buildJson);
Assert.assertTrue(buildFilePath.toFile().exists());
BuildJson resBuildJson = ProjectUtils.readBuildJson(buildFilePath);
Assert.assertEquals(resBuildJson.lastBuildTime(), 1629359520);
Assert.assertEquals(resBuildJson.lastUpdateTime(), 1629259520);
}
@Test(dependsOnMethods = "testWriteBuildFile",
expectedExceptions = ProjectException.class,
expectedExceptionsMessageRegExp = "'build' file does not have write permissions")
public void testWriteBuildFileForNonExistingPath() throws IOException {
Path buildFilePath = tempDirectory.resolve(ProjectConstants.BUILD_FILE);
new File(String.valueOf(buildFilePath)).setWritable(false);
ProjectUtils.writeBuildFile(buildFilePath, buildJson);
}
}
|
class ProjectUtilsTests {
private static final Path RESOURCE_DIRECTORY = Paths.get("src", "test", "resources");
private static final Path PROJECT_UTILS_RESOURCES = RESOURCE_DIRECTORY.resolve("project-utils");
private static Path tempDirectory;
private static BuildJson buildJson;
@BeforeClass
public void setUp() throws IOException {
tempDirectory = Files.createTempDirectory("b7a-project-utils-test-" + System.nanoTime());
buildJson = new BuildJson(1629359520, 1629259520);
}
@Test
public void testReadBuildJson() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve(ProjectConstants.BUILD_FILE);
try {
BuildJson buildJson = ProjectUtils.readBuildJson(buildFilePath);
Assert.assertEquals(buildJson.lastBuildTime(), 1629359520);
Assert.assertEquals(buildJson.lastUpdateTime(), 1629259520);
} catch (Exception e) {
Assert.fail("Reading Build Json failed");
}
}
@Test()
@Test()
public void testReadBuildJsonForInvalidBuildFile() {
Path buildFilePath = PROJECT_UTILS_RESOURCES.resolve("invalid-build");
Assert.assertThrows(JsonSyntaxException.class, () -> {
ProjectUtils.readBuildJson(buildFilePath);
});
}
}
|
+1. We can lookup the parents of the `tokenAtCursor` and check if there's a class/object as a parent. Probably move that logic to a separate method.
|
public static Optional<Range> prepareRename(PrepareRenameContext context) {
fillTokenInfoAtCursor(context);
context.checkCancelled();
Token tokenAtCursor = TokensUtil.findTokenAtPosition(context, context.getCursorPosition())
.filter(token -> token instanceof IdentifierToken)
.or(() -> {
Position originalPos = context.getCursorPosition();
Position newPos = new Position(originalPos.getLine(), originalPos.getCharacter() - 1);
return TokensUtil.findTokenAtPosition(context, newPos);
})
.orElse(null);
if (!(tokenAtCursor instanceof IdentifierToken) || CommonUtil.isKeyword(tokenAtCursor.text())
|| CommonUtil.SELF_KW.equals(tokenAtCursor.text())) {
return Optional.empty();
}
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
return Optional.empty();
}
Range cursorPosRange = new Range(context.getCursorPosition(), context.getCursorPosition());
NonTerminalNode nodeAtCursor = CommonUtil.findNode(cursorPosRange, document.get().syntaxTree());
if (onImportDeclarationNode(context, nodeAtCursor)) {
return Optional.empty();
}
return Optional.of(CommonUtil.toRange(tokenAtCursor.lineRange()));
}
|
|| CommonUtil.SELF_KW.equals(tokenAtCursor.text())) {
|
public static Optional<Range> prepareRename(PrepareRenameContext context) {
fillTokenInfoAtCursor(context);
context.checkCancelled();
Token tokenAtCursor = TokensUtil.findTokenAtPosition(context, context.getCursorPosition())
.filter(token -> token instanceof IdentifierToken)
.or(() -> {
Position originalPos = context.getCursorPosition();
Position newPos = new Position(originalPos.getLine(), originalPos.getCharacter() - 1);
return TokensUtil.findTokenAtPosition(context, newPos);
})
.orElse(null);
if (!(tokenAtCursor instanceof IdentifierToken) || CommonUtil.isKeyword(tokenAtCursor.text())
|| isSelfClassSymbol(context)) {
return Optional.empty();
}
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
return Optional.empty();
}
Range cursorPosRange = new Range(context.getCursorPosition(), context.getCursorPosition());
NonTerminalNode nodeAtCursor = CommonUtil.findNode(cursorPosRange, document.get().syntaxTree());
if (onImportDeclarationNode(context, nodeAtCursor)) {
return Optional.empty();
}
return Optional.of(CommonUtil.toRange(tokenAtCursor.lineRange()));
}
|
class RenameUtil {
private RenameUtil() {
}
/**
* Check if the provided position is valid for renaming. If valid, returns a valid range. Empty otherwise.
*
* @param context Reference context
* @return A range if position is valid for rename
*/
private static Map<String, List<TextEdit>> getChanges(
RenameContext context) {
fillTokenInfoAtCursor(context);
String newName = context.getParams().getNewName();
if (!CommonUtil.isValidIdentifier(newName)) {
throw new UserErrorException("Invalid identifier provided");
}
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
return Collections.emptyMap();
}
Range cursorPosRange = new Range(context.getCursorPosition(), context.getCursorPosition());
NonTerminalNode nodeAtCursor = CommonUtil.findNode(cursorPosRange, document.get().syntaxTree());
if (onImportDeclarationNode(context, nodeAtCursor)) {
return Collections.emptyMap();
}
if (nodeAtCursor.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE) {
String tokenAtCursorName = ((SimpleNameReferenceNode) nodeAtCursor).name().text();
if (CommonUtil.SELF_KW.equals(tokenAtCursorName)) {
return Collections.emptyMap();
}
}
if (QNameReferenceUtil.onModulePrefix(context, nodeAtCursor)) {
return handleQNameReferenceRename(context, document.get(), nodeAtCursor);
}
if (onImportPrefixNode(context, nodeAtCursor)) {
return handleImportPrefixRename(context, document.get(), nodeAtCursor);
}
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
for (Map.Entry<Module, List<Location>> entry : locationMap.entrySet()) {
Module module = entry.getKey();
List<Location> locations = entry.getValue();
for (Location location : locations) {
String uri = ReferencesUtil.getUriFromLocation(module, location);
List<TextEdit> textEdits = changes.computeIfAbsent(uri, k -> new ArrayList<>());
Range editRange = ReferencesUtil.getRange(location);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(editRange, newName));
}
}
}
return changes;
}
/**
* @param context Context
* @return {@link WorkspaceEdit} Workspace edit of changes.
*/
public static WorkspaceEdit rename(RenameContext context) {
Map<String, ChangeAnnotation> changeAnnotationMap = new HashMap<>();
WorkspaceEdit workspaceEdit = new WorkspaceEdit();
Map<String, List<TextEdit>> changes = getChanges(context);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(context.getParams().getNewName())) {
changeAnnotationMap.put(RenameChangeAnnotation.QUOTED_KEYWORD.getID(),
RenameChangeAnnotation.QUOTED_KEYWORD.getChangeAnnotation());
changeAnnotationMap.put(RenameChangeAnnotation.UNQUOTED_KEYWORD.getID(),
RenameChangeAnnotation.UNQUOTED_KEYWORD.getChangeAnnotation());
List<Either<TextDocumentEdit, ResourceOperation>> docEdits = new ArrayList<>();
changes.entrySet().forEach(entry -> {
TextDocumentEdit edit = new TextDocumentEdit();
edit.setTextDocument(new VersionedTextDocumentIdentifier(entry.getKey(), null));
edit.setEdits(entry.getValue());
docEdits.add(Either.forLeft(edit));
});
workspaceEdit.setDocumentChanges(docEdits);
workspaceEdit.setChangeAnnotations(changeAnnotationMap);
} else {
workspaceEdit.setChanges(changes);
}
return workspaceEdit;
}
private static boolean onImportPrefixNode(ReferencesContext context, NonTerminalNode node) {
if (node.kind() != SyntaxKind.IMPORT_PREFIX) {
return false;
}
ImportPrefixNode importPrefixNode = (ImportPrefixNode) node;
int cursor = context.getCursorPositionInTree();
return importPrefixNode.prefix().textRange().startOffset() <= cursor &&
cursor <= importPrefixNode.prefix().textRange().endOffset();
}
private static boolean onImportDeclarationNode(ReferencesContext context, NonTerminalNode node) {
while (node != null && node.kind() != SyntaxKind.IMPORT_DECLARATION) {
node = node.parent();
}
if (node == null) {
return false;
}
ImportDeclarationNode importDeclarationNode = (ImportDeclarationNode) node;
int cursor = context.getCursorPositionInTree();
SeparatedNodeList<IdentifierToken> moduleNames = importDeclarationNode.moduleName();
int startOffset;
if (importDeclarationNode.orgName().isPresent()) {
startOffset = importDeclarationNode.orgName().get().textRange().startOffset();
} else if (!moduleNames.isEmpty()) {
startOffset = moduleNames.get(0).textRange().startOffset();
} else {
return false;
}
return !moduleNames.isEmpty() && startOffset <= cursor &&
cursor <= moduleNames.get(moduleNames.size() - 1).textRange().endOffset();
}
private static Map<String, List<TextEdit>> handleQNameReferenceRename(RenameContext context,
Document document,
NonTerminalNode nodeAtCursor) {
QualifiedNameReferenceNode qNameRefNode = (QualifiedNameReferenceNode) nodeAtCursor;
String moduleOrAlias = qNameRefNode.modulePrefix().text();
ModulePartNode modulePartNode = document.syntaxTree().rootNode();
Optional<ImportDeclarationNode> importDeclarationNode = modulePartNode.imports().stream()
.filter(importDeclaration -> {
CodeActionModuleId moduleId = CodeActionModuleId.from(importDeclaration);
if (!StringUtils.isEmpty(moduleId.modulePrefix())) {
return moduleId.modulePrefix().equals(moduleOrAlias);
}
return moduleId.moduleName().endsWith(moduleOrAlias);
})
.findFirst();
if (importDeclarationNode.isEmpty()) {
return Collections.emptyMap();
}
return handleImportDeclarationRename(context, document, importDeclarationNode.get());
}
private static Map<String, List<TextEdit>> handleImportDeclarationRename(RenameContext context,
Document document,
ImportDeclarationNode importDeclaration) {
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
String newName = context.getParams().getNewName();
locationMap.entrySet().stream()
.filter(moduleListEntry -> moduleListEntry.getKey().moduleId().equals(document.documentId().moduleId()))
.forEach(moduleLocations -> {
Module module = moduleLocations.getKey();
List<Location> locations = moduleLocations.getValue();
locations.forEach(location -> {
String fileUri = ReferencesUtil.getUriFromLocation(module, location);
if (!context.fileUri().equals(fileUri)) {
return;
}
Range editRange = ReferencesUtil.getRange(location);
if (CommonUtil.isWithinLineRange(location.lineRange(), importDeclaration.lineRange()) &&
importDeclaration.prefix().isEmpty()) {
SeparatedNodeList<IdentifierToken> moduleNames = importDeclaration.moduleName();
LinePosition endPos = moduleNames.get(moduleNames.size() - 1).lineRange().endLine();
Range range = new Range(CommonUtil.toPosition(endPos), CommonUtil.toPosition(endPos));
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
"as " + escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
"as " + newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(range, " as " + newName));
}
} else {
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange, escapedNewName,
RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange, newName,
RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(ReferencesUtil.getRange(location), newName));
}
}
});
});
return changes;
}
private static Map<String, List<TextEdit>> handleImportPrefixRename(
RenameContext context,
Document document,
NonTerminalNode nodeAtCursor) {
String newName = context.getParams().getNewName();
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
locationMap.entrySet().stream()
.filter(moduleListEntry -> moduleListEntry.getKey().moduleId().equals(document.documentId().moduleId()))
.forEach(moduleLocations -> {
Module module = moduleLocations.getKey();
List<Location> locations = moduleLocations.getValue();
locations.forEach(location -> {
String fileUri = ReferencesUtil.getUriFromLocation(module, location);
if (!context.fileUri().equals(fileUri)) {
return;
}
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
Range editRange = ReferencesUtil.getRange(location);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeModuleName(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(editRange, newName));
}
});
});
return changes;
}
private static void fillTokenInfoAtCursor(ReferencesContext context) {
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
throw new RuntimeException("Could not find a valid document");
}
TextDocument textDocument = document.get().textDocument();
Position position = context.getCursorPosition();
int txtPos = textDocument.textPositionFrom(LinePosition.from(position.getLine(), position.getCharacter()));
context.setCursorPositionInTree(txtPos);
}
private enum RenameChangeAnnotation {
QUOTED_KEYWORD("Quoted Rename", "Rename to keyword with a quote", true, "quoted"),
UNQUOTED_KEYWORD("Un-quoted Rename", "Rename to keyword without a quote", true, "unquoted");
private String id;
private String label;
private String description;
private Boolean needsConfirmation;
RenameChangeAnnotation(String label, String description, Boolean needsConfirmation, String id) {
this.id = id;
this.label = label;
this.description = description;
this.needsConfirmation = needsConfirmation;
}
public String getID() {
return this.id;
}
public ChangeAnnotation getChangeAnnotation() {
ChangeAnnotation changeAnnotation = new ChangeAnnotation(this.label);
changeAnnotation.setDescription(this.description);
changeAnnotation.setNeedsConfirmation(this.needsConfirmation);
return changeAnnotation;
}
}
}
|
class RenameUtil {
private RenameUtil() {
}
/**
* Check if the provided position is valid for renaming. If valid, returns a valid range. Empty otherwise.
*
* @param context Reference context
* @return A range if position is valid for rename
*/
private static Map<String, List<TextEdit>> getChanges(
RenameContext context) {
fillTokenInfoAtCursor(context);
String newName = context.getParams().getNewName();
if (!CommonUtil.isValidIdentifier(newName)) {
throw new UserErrorException("Invalid identifier provided");
}
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
return Collections.emptyMap();
}
Range cursorPosRange = new Range(context.getCursorPosition(), context.getCursorPosition());
NonTerminalNode nodeAtCursor = CommonUtil.findNode(cursorPosRange, document.get().syntaxTree());
if (onImportDeclarationNode(context, nodeAtCursor)
|| (nodeAtCursor.kind() == SyntaxKind.SIMPLE_NAME_REFERENCE && isSelfClassSymbol(context))) {
return Collections.emptyMap();
}
if (QNameReferenceUtil.onModulePrefix(context, nodeAtCursor)) {
return handleQNameReferenceRename(context, document.get(), nodeAtCursor);
}
if (onImportPrefixNode(context, nodeAtCursor)) {
return handleImportPrefixRename(context, document.get(), nodeAtCursor);
}
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
for (Map.Entry<Module, List<Location>> entry : locationMap.entrySet()) {
Module module = entry.getKey();
List<Location> locations = entry.getValue();
for (Location location : locations) {
String uri = ReferencesUtil.getUriFromLocation(module, location);
List<TextEdit> textEdits = changes.computeIfAbsent(uri, k -> new ArrayList<>());
Range editRange = ReferencesUtil.getRange(location);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(editRange, newName));
}
}
}
return changes;
}
/**
* @param context Context
* @return {@link WorkspaceEdit} Workspace edit of changes.
*/
public static WorkspaceEdit rename(RenameContext context) {
Map<String, ChangeAnnotation> changeAnnotationMap = new HashMap<>();
WorkspaceEdit workspaceEdit = new WorkspaceEdit();
Map<String, List<TextEdit>> changes = getChanges(context);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(context.getParams().getNewName())) {
changeAnnotationMap.put(RenameChangeAnnotation.QUOTED_KEYWORD.getID(),
RenameChangeAnnotation.QUOTED_KEYWORD.getChangeAnnotation());
changeAnnotationMap.put(RenameChangeAnnotation.UNQUOTED_KEYWORD.getID(),
RenameChangeAnnotation.UNQUOTED_KEYWORD.getChangeAnnotation());
List<Either<TextDocumentEdit, ResourceOperation>> docEdits = new ArrayList<>();
changes.entrySet().forEach(entry -> {
TextDocumentEdit edit = new TextDocumentEdit();
edit.setTextDocument(new VersionedTextDocumentIdentifier(entry.getKey(), null));
edit.setEdits(entry.getValue());
docEdits.add(Either.forLeft(edit));
});
workspaceEdit.setDocumentChanges(docEdits);
workspaceEdit.setChangeAnnotations(changeAnnotationMap);
} else {
workspaceEdit.setChanges(changes);
}
return workspaceEdit;
}
private static boolean onImportPrefixNode(ReferencesContext context, NonTerminalNode node) {
if (node.kind() != SyntaxKind.IMPORT_PREFIX) {
return false;
}
ImportPrefixNode importPrefixNode = (ImportPrefixNode) node;
int cursor = context.getCursorPositionInTree();
return importPrefixNode.prefix().textRange().startOffset() <= cursor &&
cursor <= importPrefixNode.prefix().textRange().endOffset();
}
private static boolean onImportDeclarationNode(ReferencesContext context, NonTerminalNode node) {
while (node != null && node.kind() != SyntaxKind.IMPORT_DECLARATION) {
node = node.parent();
}
if (node == null) {
return false;
}
ImportDeclarationNode importDeclarationNode = (ImportDeclarationNode) node;
int cursor = context.getCursorPositionInTree();
SeparatedNodeList<IdentifierToken> moduleNames = importDeclarationNode.moduleName();
int startOffset;
if (importDeclarationNode.orgName().isPresent()) {
startOffset = importDeclarationNode.orgName().get().textRange().startOffset();
} else if (!moduleNames.isEmpty()) {
startOffset = moduleNames.get(0).textRange().startOffset();
} else {
return false;
}
return !moduleNames.isEmpty() && startOffset <= cursor &&
cursor <= moduleNames.get(moduleNames.size() - 1).textRange().endOffset();
}
private static Map<String, List<TextEdit>> handleQNameReferenceRename(RenameContext context,
Document document,
NonTerminalNode nodeAtCursor) {
QualifiedNameReferenceNode qNameRefNode = (QualifiedNameReferenceNode) nodeAtCursor;
String moduleOrAlias = qNameRefNode.modulePrefix().text();
ModulePartNode modulePartNode = document.syntaxTree().rootNode();
Optional<ImportDeclarationNode> importDeclarationNode = modulePartNode.imports().stream()
.filter(importDeclaration -> {
CodeActionModuleId moduleId = CodeActionModuleId.from(importDeclaration);
if (!StringUtils.isEmpty(moduleId.modulePrefix())) {
return moduleId.modulePrefix().equals(moduleOrAlias);
}
return moduleId.moduleName().endsWith(moduleOrAlias);
})
.findFirst();
if (importDeclarationNode.isEmpty()) {
return Collections.emptyMap();
}
return handleImportDeclarationRename(context, document, importDeclarationNode.get());
}
private static Map<String, List<TextEdit>> handleImportDeclarationRename(RenameContext context,
Document document,
ImportDeclarationNode importDeclaration) {
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
String newName = context.getParams().getNewName();
locationMap.entrySet().stream()
.filter(moduleListEntry -> moduleListEntry.getKey().moduleId().equals(document.documentId().moduleId()))
.forEach(moduleLocations -> {
Module module = moduleLocations.getKey();
List<Location> locations = moduleLocations.getValue();
locations.forEach(location -> {
String fileUri = ReferencesUtil.getUriFromLocation(module, location);
if (!context.fileUri().equals(fileUri)) {
return;
}
Range editRange = ReferencesUtil.getRange(location);
if (CommonUtil.isWithinLineRange(location.lineRange(), importDeclaration.lineRange()) &&
importDeclaration.prefix().isEmpty()) {
SeparatedNodeList<IdentifierToken> moduleNames = importDeclaration.moduleName();
LinePosition endPos = moduleNames.get(moduleNames.size() - 1).lineRange().endLine();
Range range = new Range(CommonUtil.toPosition(endPos), CommonUtil.toPosition(endPos));
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
"as " + escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
"as " + newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(range, " as " + newName));
}
} else {
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeReservedKeyword(newName);
textEdits.add(new AnnotatedTextEdit(editRange, escapedNewName,
RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange, newName,
RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(ReferencesUtil.getRange(location), newName));
}
}
});
});
return changes;
}
private static Map<String, List<TextEdit>> handleImportPrefixRename(
RenameContext context,
Document document,
NonTerminalNode nodeAtCursor) {
String newName = context.getParams().getNewName();
Map<Module, List<Location>> locationMap = ReferencesUtil.getReferences(context);
Map<String, List<TextEdit>> changes = new HashMap<>();
locationMap.entrySet().stream()
.filter(moduleListEntry -> moduleListEntry.getKey().moduleId().equals(document.documentId().moduleId()))
.forEach(moduleLocations -> {
Module module = moduleLocations.getKey();
List<Location> locations = moduleLocations.getValue();
locations.forEach(location -> {
String fileUri = ReferencesUtil.getUriFromLocation(module, location);
if (!context.fileUri().equals(fileUri)) {
return;
}
List<TextEdit> textEdits = changes.computeIfAbsent(fileUri, k -> new ArrayList<>());
Range editRange = ReferencesUtil.getRange(location);
if (context.getHonorsChangeAnnotations() && CommonUtil.isKeyword(newName)) {
String escapedNewName = CommonUtil.escapeModuleName(newName);
textEdits.add(new AnnotatedTextEdit(editRange,
escapedNewName, RenameChangeAnnotation.QUOTED_KEYWORD.getID()));
textEdits.add(new AnnotatedTextEdit(editRange,
newName, RenameChangeAnnotation.UNQUOTED_KEYWORD.getID()));
} else {
textEdits.add(new TextEdit(editRange, newName));
}
});
});
return changes;
}
private static void fillTokenInfoAtCursor(ReferencesContext context) {
Optional<Document> document = context.currentDocument();
if (document.isEmpty()) {
throw new RuntimeException("Could not find a valid document");
}
TextDocument textDocument = document.get().textDocument();
Position position = context.getCursorPosition();
int txtPos = textDocument.textPositionFrom(LinePosition.from(position.getLine(), position.getCharacter()));
context.setCursorPositionInTree(txtPos);
}
private static boolean isSelfClassSymbol(ReferencesContext context) {
Optional<Document> srcFile = context.currentDocument();
Optional<SemanticModel> semanticModel = context.currentSemanticModel();
if (srcFile.isEmpty() || semanticModel.isEmpty() || context.currentSyntaxTree().isEmpty()) {
return false;
}
Position position = context.getCursorPosition();
LinePosition linePosition = LinePosition.from(position.getLine(), position.getCharacter());
Optional<Symbol> symbol = semanticModel.get().symbol(srcFile.get(), linePosition);
if (symbol.isEmpty()) {
return false;
}
Optional<ModuleMemberDeclarationNode> enclosingNode = BallerinaContextUtils.
getEnclosingModuleMember(context.currentSyntaxTree().get(), context.getCursorPositionInTree());
if (enclosingNode.isEmpty()) {
return false;
}
return CommonUtil.isSelfClassSymbol(symbol.get(), context, enclosingNode.get());
}
private enum RenameChangeAnnotation {
QUOTED_KEYWORD("Quoted Rename", "Rename to keyword with a quote", true, "quoted"),
UNQUOTED_KEYWORD("Un-quoted Rename", "Rename to keyword without a quote", true, "unquoted");
private String id;
private String label;
private String description;
private Boolean needsConfirmation;
RenameChangeAnnotation(String label, String description, Boolean needsConfirmation, String id) {
this.id = id;
this.label = label;
this.description = description;
this.needsConfirmation = needsConfirmation;
}
public String getID() {
return this.id;
}
public ChangeAnnotation getChangeAnnotation() {
ChangeAnnotation changeAnnotation = new ChangeAnnotation(this.label);
changeAnnotation.setDescription(this.description);
changeAnnotation.setNeedsConfirmation(this.needsConfirmation);
return changeAnnotation;
}
}
}
|
It might cause job progress persisting issue: progress lost
|
private void initInventoryTasks(final CDCJobItemContext jobItemContext, final AtomicBoolean importerUsed, final List<CDCChannelProgressPair> channelProgressPairs) {
long startTimeMillis = System.currentTimeMillis();
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
ImporterConfiguration importerConfig = taskConfig.getImporterConfig();
TransmissionProcessContext processContext = jobItemContext.getJobProcessContext();
for (InventoryDumperContext each : new InventoryTaskSplitter(jobItemContext.getSourceDataSource(), new InventoryDumperContext(taskConfig.getDumperContext().getCommonContext()), importerConfig)
.splitInventoryDumperContext(jobItemContext)) {
if (each.getCommonContext().getPosition() instanceof FinishedPosition) {
continue;
}
AtomicReference<IngestPosition> position = new AtomicReference<>(each.getCommonContext().getPosition());
PipelineChannel channel = PipelineTaskUtils.createInventoryChannel(processContext.getPipelineChannelCreator(), importerConfig.getBatchSize(), position);
channelProgressPairs.add(new CDCChannelProgressPair(channel, jobItemContext));
Dumper dumper = new InventoryDumper(each, channel, jobItemContext.getSourceDataSource(), jobItemContext.getSourceMetaDataLoader());
Importer importer = importerUsed.get() ? null
: new CDCImporter(channelProgressPairs, importerConfig.getBatchSize(), 3, TimeUnit.SECONDS, jobItemContext.getSink(),
needSorting(ImporterType.INVENTORY, hasGlobalCSN(taskConfig.getDumperContext().getCommonContext().getDataSourceConfig().getDatabaseType())),
importerConfig.getRateLimitAlgorithm());
jobItemContext.getInventoryTasks().add(new CDCInventoryTask(PipelineTaskUtils.generateInventoryTaskId(each), processContext.getInventoryDumperExecuteEngine(),
processContext.getInventoryImporterExecuteEngine(), dumper, importer, position));
importerUsed.set(true);
}
log.info("initInventoryTasks cost {} ms", System.currentTimeMillis() - startTimeMillis);
}
|
}
|
private void initInventoryTasks(final CDCJobItemContext jobItemContext, final AtomicBoolean importerUsed, final List<CDCChannelProgressPair> channelProgressPairs) {
long startTimeMillis = System.currentTimeMillis();
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
ImporterConfiguration importerConfig = taskConfig.getImporterConfig();
TransmissionProcessContext processContext = jobItemContext.getJobProcessContext();
for (InventoryDumperContext each : new InventoryTaskSplitter(jobItemContext.getSourceDataSource(), new InventoryDumperContext(taskConfig.getDumperContext().getCommonContext()), importerConfig)
.splitInventoryDumperContext(jobItemContext)) {
AtomicReference<IngestPosition> position = new AtomicReference<>(each.getCommonContext().getPosition());
PipelineChannel channel = PipelineTaskUtils.createInventoryChannel(processContext.getPipelineChannelCreator(), importerConfig.getBatchSize(), position);
if (!(position.get() instanceof FinishedPosition)) {
channelProgressPairs.add(new CDCChannelProgressPair(channel, jobItemContext));
}
Dumper dumper = new InventoryDumper(each, channel, jobItemContext.getSourceDataSource(), jobItemContext.getSourceMetaDataLoader());
Importer importer = importerUsed.get() ? null
: new CDCImporter(channelProgressPairs, importerConfig.getBatchSize(), 3, TimeUnit.SECONDS, jobItemContext.getSink(),
needSorting(ImporterType.INVENTORY, hasGlobalCSN(taskConfig.getDumperContext().getCommonContext().getDataSourceConfig().getDatabaseType())),
importerConfig.getRateLimitAlgorithm());
jobItemContext.getInventoryTasks().add(new CDCInventoryTask(PipelineTaskUtils.generateInventoryTaskId(each), processContext.getInventoryDumperExecuteEngine(),
processContext.getInventoryImporterExecuteEngine(), dumper, importer, position));
if (!(position.get() instanceof FinishedPosition)) {
importerUsed.set(true);
}
}
log.info("initInventoryTasks cost {} ms", System.currentTimeMillis() - startTimeMillis);
}
|
class CDCJobPreparer {
private final PipelineJobItemManager<TransmissionJobItemProgress> jobItemManager = new PipelineJobItemManager<>(new CDCJobType().getYamlJobItemProgressSwapper());
/**
* Do prepare work.
*
* @param jobItemContexts job item contexts
*/
public void initTasks(final Collection<CDCJobItemContext> jobItemContexts) {
AtomicBoolean inventoryImporterUsed = new AtomicBoolean();
List<CDCChannelProgressPair> inventoryChannelProgressPairs = new CopyOnWriteArrayList<>();
AtomicBoolean incrementalImporterUsed = new AtomicBoolean();
List<CDCChannelProgressPair> incrementalChannelProgressPairs = new LinkedList<>();
for (CDCJobItemContext each : jobItemContexts) {
initTasks0(each, inventoryImporterUsed, inventoryChannelProgressPairs, incrementalImporterUsed, incrementalChannelProgressPairs);
}
}
private void initTasks0(final CDCJobItemContext jobItemContext, final AtomicBoolean inventoryImporterUsed, final List<CDCChannelProgressPair> inventoryChannelProgressPairs,
final AtomicBoolean incrementalImporterUsed, final List<CDCChannelProgressPair> incrementalChannelProgressPairs) {
Optional<TransmissionJobItemProgress> jobItemProgress = jobItemManager.getProgress(jobItemContext.getJobId(), jobItemContext.getShardingItem());
if (!jobItemProgress.isPresent()) {
jobItemManager.persistProgress(jobItemContext);
}
if (jobItemContext.isStopping()) {
PipelineJobRegistry.stop(jobItemContext.getJobId());
return;
}
initIncrementalPosition(jobItemContext);
if (jobItemContext.getJobConfig().isFull()) {
initInventoryTasks(jobItemContext, inventoryImporterUsed, inventoryChannelProgressPairs);
}
initIncrementalTask(jobItemContext, incrementalImporterUsed, incrementalChannelProgressPairs);
}
private void initIncrementalPosition(final CDCJobItemContext jobItemContext) {
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
JobItemIncrementalTasksProgress initIncremental = null == jobItemContext.getInitProgress() ? null : jobItemContext.getInitProgress().getIncremental();
try {
taskConfig.getDumperContext().getCommonContext().setPosition(
PipelineJobPreparerUtils.getIncrementalPosition(initIncremental, taskConfig.getDumperContext(), jobItemContext.getDataSourceManager()));
} catch (final SQLException ex) {
throw new PrepareJobWithGetBinlogPositionException(jobItemContext.getJobId(), ex);
}
}
private boolean needSorting(final ImporterType importerType, final boolean hasGlobalCSN) {
return ImporterType.INCREMENTAL == importerType && hasGlobalCSN;
}
private boolean hasGlobalCSN(final DatabaseType databaseType) {
return databaseType instanceof OpenGaussDatabaseType;
}
private void initIncrementalTask(final CDCJobItemContext jobItemContext, final AtomicBoolean importerUsed, final List<CDCChannelProgressPair> channelProgressPairs) {
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
IncrementalDumperContext dumperContext = taskConfig.getDumperContext();
ImporterConfiguration importerConfig = taskConfig.getImporterConfig();
IncrementalTaskProgress taskProgress = PipelineTaskUtils.createIncrementalTaskProgress(dumperContext.getCommonContext().getPosition(), jobItemContext.getInitProgress());
PipelineChannel channel = PipelineTaskUtils.createIncrementalChannel(importerConfig.getConcurrency(), jobItemContext.getJobProcessContext().getPipelineChannelCreator(), taskProgress);
channelProgressPairs.add(new CDCChannelProgressPair(channel, jobItemContext));
Dumper dumper = DatabaseTypedSPILoader.getService(IncrementalDumperCreator.class, dumperContext.getCommonContext().getDataSourceConfig().getDatabaseType())
.createIncrementalDumper(dumperContext, dumperContext.getCommonContext().getPosition(), channel, jobItemContext.getSourceMetaDataLoader());
boolean needSorting = needSorting(ImporterType.INCREMENTAL, hasGlobalCSN(importerConfig.getDataSourceConfig().getDatabaseType()));
Importer importer = importerUsed.get() ? null
: new CDCImporter(channelProgressPairs, importerConfig.getBatchSize(), 300, TimeUnit.MILLISECONDS,
jobItemContext.getSink(), needSorting, importerConfig.getRateLimitAlgorithm());
PipelineTask incrementalTask = new CDCIncrementalTask(
dumperContext.getCommonContext().getDataSourceName(), jobItemContext.getJobProcessContext().getIncrementalExecuteEngine(), dumper, importer, taskProgress);
jobItemContext.getIncrementalTasks().add(incrementalTask);
importerUsed.set(true);
}
}
|
class CDCJobPreparer {
private final PipelineJobItemManager<TransmissionJobItemProgress> jobItemManager = new PipelineJobItemManager<>(new CDCJobType().getYamlJobItemProgressSwapper());
/**
* Do prepare work.
*
* @param jobItemContexts job item contexts
*/
public void initTasks(final Collection<CDCJobItemContext> jobItemContexts) {
AtomicBoolean inventoryImporterUsed = new AtomicBoolean();
List<CDCChannelProgressPair> inventoryChannelProgressPairs = new CopyOnWriteArrayList<>();
AtomicBoolean incrementalImporterUsed = new AtomicBoolean();
List<CDCChannelProgressPair> incrementalChannelProgressPairs = new LinkedList<>();
for (CDCJobItemContext each : jobItemContexts) {
initTasks0(each, inventoryImporterUsed, inventoryChannelProgressPairs, incrementalImporterUsed, incrementalChannelProgressPairs);
}
}
private void initTasks0(final CDCJobItemContext jobItemContext, final AtomicBoolean inventoryImporterUsed, final List<CDCChannelProgressPair> inventoryChannelProgressPairs,
final AtomicBoolean incrementalImporterUsed, final List<CDCChannelProgressPair> incrementalChannelProgressPairs) {
Optional<TransmissionJobItemProgress> jobItemProgress = jobItemManager.getProgress(jobItemContext.getJobId(), jobItemContext.getShardingItem());
if (!jobItemProgress.isPresent()) {
jobItemManager.persistProgress(jobItemContext);
}
if (jobItemContext.isStopping()) {
PipelineJobRegistry.stop(jobItemContext.getJobId());
return;
}
initIncrementalPosition(jobItemContext);
if (jobItemContext.getJobConfig().isFull()) {
initInventoryTasks(jobItemContext, inventoryImporterUsed, inventoryChannelProgressPairs);
}
initIncrementalTask(jobItemContext, incrementalImporterUsed, incrementalChannelProgressPairs);
}
private void initIncrementalPosition(final CDCJobItemContext jobItemContext) {
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
JobItemIncrementalTasksProgress initIncremental = null == jobItemContext.getInitProgress() ? null : jobItemContext.getInitProgress().getIncremental();
try {
taskConfig.getDumperContext().getCommonContext().setPosition(
PipelineJobPreparerUtils.getIncrementalPosition(initIncremental, taskConfig.getDumperContext(), jobItemContext.getDataSourceManager()));
} catch (final SQLException ex) {
throw new PrepareJobWithGetBinlogPositionException(jobItemContext.getJobId(), ex);
}
}
private boolean needSorting(final ImporterType importerType, final boolean hasGlobalCSN) {
return ImporterType.INCREMENTAL == importerType && hasGlobalCSN;
}
private boolean hasGlobalCSN(final DatabaseType databaseType) {
return databaseType instanceof OpenGaussDatabaseType;
}
private void initIncrementalTask(final CDCJobItemContext jobItemContext, final AtomicBoolean importerUsed, final List<CDCChannelProgressPair> channelProgressPairs) {
CDCTaskConfiguration taskConfig = jobItemContext.getTaskConfig();
IncrementalDumperContext dumperContext = taskConfig.getDumperContext();
ImporterConfiguration importerConfig = taskConfig.getImporterConfig();
IncrementalTaskProgress taskProgress = PipelineTaskUtils.createIncrementalTaskProgress(dumperContext.getCommonContext().getPosition(), jobItemContext.getInitProgress());
PipelineChannel channel = PipelineTaskUtils.createIncrementalChannel(importerConfig.getConcurrency(), jobItemContext.getJobProcessContext().getPipelineChannelCreator(), taskProgress);
channelProgressPairs.add(new CDCChannelProgressPair(channel, jobItemContext));
Dumper dumper = DatabaseTypedSPILoader.getService(IncrementalDumperCreator.class, dumperContext.getCommonContext().getDataSourceConfig().getDatabaseType())
.createIncrementalDumper(dumperContext, dumperContext.getCommonContext().getPosition(), channel, jobItemContext.getSourceMetaDataLoader());
boolean needSorting = needSorting(ImporterType.INCREMENTAL, hasGlobalCSN(importerConfig.getDataSourceConfig().getDatabaseType()));
Importer importer = importerUsed.get() ? null
: new CDCImporter(channelProgressPairs, importerConfig.getBatchSize(), 300, TimeUnit.MILLISECONDS,
jobItemContext.getSink(), needSorting, importerConfig.getRateLimitAlgorithm());
PipelineTask incrementalTask = new CDCIncrementalTask(
dumperContext.getCommonContext().getDataSourceName(), jobItemContext.getJobProcessContext().getIncrementalExecuteEngine(), dumper, importer, taskProgress);
jobItemContext.getIncrementalTasks().add(incrementalTask);
importerUsed.set(true);
}
}
|
s3 scheme: s3 and s3a cos scheme: cosn
|
private FileCacheValue loadFiles(FileCacheKey key) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(ClassLoader.getSystemClassLoader());
String finalLocation = S3Util.convertToS3IfNecessary(key.location);
try {
Path path = new Path(finalLocation);
URI uri = path.toUri();
if (uri.getScheme() != null) {
updateJobConf("fs." + uri.getScheme() + ".impl.disable.cache", "true");
}
} catch (Exception e) {
LOG.warn("unknown scheme in path: " + finalLocation, e);
}
FileInputFormat.setInputPaths(jobConf, finalLocation);
try {
FileCacheValue result;
InputFormat<?, ?> inputFormat = HiveUtil.getInputFormat(jobConf, key.inputFormat, false);
if (key.useSelfSplitter) {
result = getFileCache(finalLocation, inputFormat, jobConf, key.getPartitionValues());
} else {
InputSplit[] splits;
String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME);
if (!Strings.isNullOrEmpty(remoteUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(remoteUser);
splits = ugi.doAs(
(PrivilegedExceptionAction<InputSplit[]>) () -> inputFormat.getSplits(jobConf, 0));
} else {
splits = inputFormat.getSplits(jobConf, 0 /* use hdfs block size as default */);
}
result = new FileCacheValue();
for (int i = 0; i < splits.length; i++) {
org.apache.hadoop.mapred.FileSplit fs = ((org.apache.hadoop.mapred.FileSplit) splits[i]);
result.addSplit(new FileSplit(fs.getPath(), fs.getStart(), fs.getLength(), -1, null, null));
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("load
}
return result;
} catch (Exception e) {
throw new CacheException("failed to get input splits for %s in catalog %s", e, key, catalog.getName());
}
} finally {
Thread.currentThread().setContextClassLoader(classLoader);
}
}
|
updateJobConf("fs." + uri.getScheme() + ".impl.disable.cache", "true");
|
private FileCacheValue loadFiles(FileCacheKey key) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(ClassLoader.getSystemClassLoader());
String finalLocation = S3Util.convertToS3IfNecessary(key.location);
try {
Path path = new Path(finalLocation);
URI uri = path.toUri();
if (uri.getScheme() != null) {
updateJobConf("fs." + uri.getScheme() + ".impl.disable.cache", "true");
}
} catch (Exception e) {
LOG.warn("unknown scheme in path: " + finalLocation, e);
}
FileInputFormat.setInputPaths(jobConf, finalLocation);
try {
FileCacheValue result;
InputFormat<?, ?> inputFormat = HiveUtil.getInputFormat(jobConf, key.inputFormat, false);
if (key.useSelfSplitter) {
result = getFileCache(finalLocation, inputFormat, jobConf, key.getPartitionValues());
} else {
InputSplit[] splits;
String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME);
if (!Strings.isNullOrEmpty(remoteUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(remoteUser);
splits = ugi.doAs(
(PrivilegedExceptionAction<InputSplit[]>) () -> inputFormat.getSplits(jobConf, 0));
} else {
splits = inputFormat.getSplits(jobConf, 0 /* use hdfs block size as default */);
}
result = new FileCacheValue();
for (int i = 0; i < splits.length; i++) {
org.apache.hadoop.mapred.FileSplit fs = ((org.apache.hadoop.mapred.FileSplit) splits[i]);
result.addSplit(new FileSplit(fs.getPath(), fs.getStart(), fs.getLength(), -1, null, null));
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("load
}
return result;
} catch (Exception e) {
throw new CacheException("failed to get input splits for %s in catalog %s", e, key, catalog.getName());
}
} finally {
Thread.currentThread().setContextClassLoader(classLoader);
}
}
|
class HiveMetaStoreCache {
private static final Logger LOG = LogManager.getLogger(HiveMetaStoreCache.class);
private static final int MIN_BATCH_FETCH_PARTITION_NUM = 50;
public static final String HIVE_DEFAULT_PARTITION = "__HIVE_DEFAULT_PARTITION__";
public static final String HIVE_ORC_ACID_VERSION_FILE = "_orc_acid_version";
private static final String HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX = "bucket_";
private HMSExternalCatalog catalog;
private JobConf jobConf;
private Executor executor;
private LoadingCache<PartitionValueCacheKey, HivePartitionValues> partitionValuesCache;
private LoadingCache<PartitionCacheKey, HivePartition> partitionCache;
private volatile AtomicReference<LoadingCache<FileCacheKey, FileCacheValue>> fileCacheRef
= new AtomicReference<>();
public HiveMetaStoreCache(HMSExternalCatalog catalog, Executor executor) {
this.catalog = catalog;
this.executor = executor;
init();
initMetrics();
}
private void init() {
partitionValuesCache = CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_catch_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES)
.build(CacheLoader.asyncReloading(
new CacheLoader<PartitionValueCacheKey, HivePartitionValues>() {
@Override
public HivePartitionValues load(PartitionValueCacheKey key) throws Exception {
return loadPartitionValues(key);
}
}, executor));
partitionCache = CacheBuilder.newBuilder().maximumSize(Config.max_hive_partition_cache_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES)
.build(CacheLoader.asyncReloading(new CacheLoader<PartitionCacheKey, HivePartition>() {
@Override
public HivePartition load(PartitionCacheKey key) throws Exception {
return loadPartitions(key);
}
}, executor));
setNewFileCache();
}
/***
* generate a filecache and set to fileCacheRef
*/
public void setNewFileCache() {
setJobConf();
int fileMetaCacheTtlSecond = NumberUtils.toInt(
(catalog.getProperties().get(HMSExternalCatalog.FILE_META_CACHE_TTL_SECOND)),
HMSExternalCatalog.FILE_META_CACHE_NO_TTL);
CacheBuilder<Object, Object> fileCacheBuilder = CacheBuilder.newBuilder()
.maximumSize(Config.max_external_file_cache_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES);
if (fileMetaCacheTtlSecond >= HMSExternalCatalog.FILE_META_CACHE_TTL_DISABLE_CACHE) {
fileCacheBuilder.expireAfterWrite(fileMetaCacheTtlSecond, TimeUnit.SECONDS);
}
CacheLoader<FileCacheKey, FileCacheValue> loader = getGuavaCacheLoader(executor,
fileMetaCacheTtlSecond);
LoadingCache<FileCacheKey, FileCacheValue> preFileCache = fileCacheRef.get();
fileCacheRef.set(fileCacheBuilder.build(loader));
if (Objects.nonNull(preFileCache)) {
preFileCache.invalidateAll();
}
}
private void initMetrics() {
GaugeMetric<Long> valueCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive partition value cache number") {
@Override
public Long getValue() {
return partitionValuesCache.size();
}
};
valueCacheGauge.addLabel(new MetricLabel("type", "partition_value"));
valueCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(valueCacheGauge);
GaugeMetric<Long> partitionCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive partition cache number") {
@Override
public Long getValue() {
return partitionCache.size();
}
};
partitionCacheGauge.addLabel(new MetricLabel("type", "partition"));
partitionCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(partitionCacheGauge);
GaugeMetric<Long> fileCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive file cache number") {
@Override
public Long getValue() {
return fileCacheRef.get().size();
}
};
fileCacheGauge.addLabel(new MetricLabel("type", "file"));
fileCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(fileCacheGauge);
}
private HivePartitionValues loadPartitionValues(PartitionValueCacheKey key) {
List<String> partitionNames = catalog.getClient().listPartitionNames(key.dbName, key.tblName);
if (LOG.isDebugEnabled()) {
LOG.debug("load
}
Map<Long, PartitionItem> idToPartitionItem = Maps.newHashMapWithExpectedSize(partitionNames.size());
Map<String, Long> partitionNameToIdMap = Maps.newHashMapWithExpectedSize(partitionNames.size());
Map<Long, List<UniqueId>> idToUniqueIdsMap = Maps.newHashMapWithExpectedSize(partitionNames.size());
long idx = 0;
for (String partitionName : partitionNames) {
long partitionId = idx++;
ListPartitionItem listPartitionItem = toListPartitionItem(partitionName, key.types);
idToPartitionItem.put(partitionId, listPartitionItem);
partitionNameToIdMap.put(partitionName, partitionId);
}
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange = null;
Map<Range<PartitionKey>, UniqueId> rangeToId = null;
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap = null;
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap = null;
if (key.types.size() > 1) {
uidToPartitionRange = ListPartitionPrunerV2.genUidToPartitionRange(idToPartitionItem, idToUniqueIdsMap);
rangeToId = ListPartitionPrunerV2.genRangeToId(uidToPartitionRange);
} else {
Preconditions.checkState(key.types.size() == 1, key.types);
singleColumnRangeMap = ListPartitionPrunerV2.genSingleColumnRangeMap(idToPartitionItem, idToUniqueIdsMap);
singleUidToColumnRangeMap = ListPartitionPrunerV2.genSingleUidToColumnRange(singleColumnRangeMap);
}
Map<Long, List<String>> partitionValuesMap = ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
return new HivePartitionValues(idToPartitionItem, uidToPartitionRange, rangeToId, singleColumnRangeMap, idx,
partitionNameToIdMap, idToUniqueIdsMap, singleUidToColumnRangeMap, partitionValuesMap);
}
public ListPartitionItem toListPartitionItem(String partitionName, List<Type> types) {
String[] parts = partitionName.split("/");
Preconditions.checkState(parts.length == types.size(), partitionName + " vs. " + types);
List<PartitionValue> values = Lists.newArrayListWithExpectedSize(types.size());
for (String part : parts) {
String[] kv = part.split("=");
Preconditions.checkState(kv.length == 2, partitionName);
String partitionValue;
try {
partitionValue = URLDecoder.decode(kv[1], StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
values.add(new PartitionValue(partitionValue, HIVE_DEFAULT_PARTITION.equals(partitionValue)));
}
try {
PartitionKey key = PartitionKey.createListPartitionKeyWithTypes(values, types);
return new ListPartitionItem(Lists.newArrayList(key));
} catch (AnalysisException e) {
throw new CacheException("failed to convert hive partition %s to list partition in catalog %s",
e, partitionName, catalog.getName());
}
}
private HivePartition loadPartitions(PartitionCacheKey key) {
Partition partition = catalog.getClient().getPartition(key.dbName, key.tblName, key.values);
StorageDescriptor sd = partition.getSd();
if (LOG.isDebugEnabled()) {
LOG.debug("load partition format: {}, location: {} for {} in catalog {}",
sd.getInputFormat(), sd.getLocation(), key, catalog.getName());
}
return new HivePartition(key.dbName, key.tblName, false, sd.getInputFormat(), sd.getLocation(), key.values);
}
private FileCacheValue getFileCache(String location, InputFormat<?, ?> inputFormat,
JobConf jobConf,
List<String> partitionValues) throws UserException {
FileCacheValue result = new FileCacheValue();
result.setSplittable(HiveUtil.isSplittable(inputFormat, new Path(location), jobConf));
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
try {
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, true);
locatedFiles.files().forEach(result::addFile);
} catch (Exception e) {
if (e.getCause() instanceof FileNotFoundException) {
LOG.warn(String.format("File %s not exist.", location));
} else {
throw e;
}
}
result.setPartitionValues(partitionValues);
return result;
}
private synchronized void setJobConf() {
Configuration configuration = new HdfsConfiguration();
for (Map.Entry<String, String> entry : catalog.getCatalogProperty().getHadoopProperties().entrySet()) {
configuration.set(entry.getKey(), entry.getValue());
}
jobConf = new JobConf(configuration);
jobConf.set("mapreduce.input.fileinputformat.input.dir.recursive", "true");
jobConf.set("fs.hdfs.impl.disable.cache", "true");
jobConf.set("fs.file.impl.disable.cache", "true");
}
private synchronized void updateJobConf(String key, String value) {
jobConf.set(key, value);
}
public HivePartitionValues getPartitionValues(String dbName, String tblName, List<Type> types) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, types);
return getPartitionValues(key);
}
public HivePartitionValues getPartitionValues(PartitionValueCacheKey key) {
try {
return partitionValuesCache.get(key);
} catch (ExecutionException e) {
throw new CacheException("failed to get partition values for %s in catalog %s", e, key, catalog.getName());
}
}
public List<FileCacheValue> getFilesByPartitions(List<HivePartition> partitions, boolean useSelfSplitter) {
long start = System.currentTimeMillis();
List<FileCacheKey> keys = Lists.newArrayListWithExpectedSize(partitions.size());
partitions.stream().forEach(p -> {
FileCacheKey fileCacheKey = p.isDummyPartition()
? FileCacheKey.createDummyCacheKey(p.getDbName(), p.getTblName(), p.getPath(),
p.getInputFormat(), useSelfSplitter)
: new FileCacheKey(p.getPath(), p.getInputFormat(), p.getPartitionValues());
fileCacheKey.setUseSelfSplitter(useSelfSplitter);
keys.add(fileCacheKey);
});
Stream<FileCacheKey> stream;
if (partitions.size() < MIN_BATCH_FETCH_PARTITION_NUM) {
stream = keys.stream();
} else {
stream = keys.parallelStream();
}
List<FileCacheValue> fileLists = stream.map(k -> {
try {
FileCacheValue fileCacheValue = fileCacheRef.get().get(k);
for (int i = 0; i < fileCacheValue.getValuesSize(); i++) {
if (HIVE_DEFAULT_PARTITION.equals(fileCacheValue.getPartitionValues().get(i))) {
fileCacheValue.getPartitionValues().set(i, FeConstants.null_string);
}
}
return fileCacheValue;
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}).collect(Collectors.toList());
LOG.debug("get
fileLists.stream().mapToInt(l -> l.getFiles() == null
? (l.getSplits() == null ? 0 : l.getSplits().size()) : l.getFiles().size()).sum(),
partitions.size(), catalog.getName(), (System.currentTimeMillis() - start));
return fileLists;
}
public List<HivePartition> getAllPartitions(String dbName, String name, List<List<String>> partitionValuesList) {
long start = System.currentTimeMillis();
List<PartitionCacheKey> keys = Lists.newArrayListWithExpectedSize(partitionValuesList.size());
partitionValuesList.stream().forEach(p -> keys.add(new PartitionCacheKey(dbName, name, p)));
Stream<PartitionCacheKey> stream;
if (partitionValuesList.size() < MIN_BATCH_FETCH_PARTITION_NUM) {
stream = keys.stream();
} else {
stream = keys.parallelStream();
}
List<HivePartition> partitions = stream.map(k -> {
try {
return partitionCache.get(k);
} catch (ExecutionException e) {
throw new CacheException("failed to get partition for %s in catalog %s", e, k, catalog.getName());
}
}).collect(Collectors.toList());
LOG.debug("get
(System.currentTimeMillis() - start));
return partitions;
}
public void invalidateTableCache(String dbName, String tblName) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues != null) {
long start = System.currentTimeMillis();
for (List<String> values : partitionValues.partitionValuesMap.values()) {
PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values);
HivePartition partition = partitionCache.getIfPresent(partKey);
if (partition != null) {
fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(),
null, partition.getPartitionValues()));
partitionCache.invalidate(partKey);
}
}
partitionValuesCache.invalidate(key);
LOG.debug("invalid table cache for {}.{} in catalog {}, cache num: {}, cost: {} ms",
dbName, tblName, catalog.getName(), partitionValues.partitionValuesMap.size(),
(System.currentTimeMillis() - start));
} else {
/**
* A file cache entry can be created reference to
* {@link org.apache.doris.planner.external.HiveSplitter
* so we need to invalidate it if this is a non-partitioned table.
* We use {@link org.apache.doris.datasource.hive.HiveMetaStoreCache.FileCacheKey
* to avoid invocation by Hms Client, because this method may be invoked when salve FE replay journal logs,
* and FE will exit if some network problems occur.
* */
FileCacheKey fileCacheKey = FileCacheKey.createDummyCacheKey(
dbName, tblName, null, null, false);
fileCacheRef.get().invalidate(fileCacheKey);
}
}
public void invalidatePartitionCache(String dbName, String tblName, String partitionName) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues != null) {
Long partitionId = partitionValues.partitionNameToIdMap.get(partitionName);
List<String> values = partitionValues.partitionValuesMap.get(partitionId);
PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values);
HivePartition partition = partitionCache.getIfPresent(partKey);
if (partition != null) {
fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(),
null, partition.getPartitionValues()));
partitionCache.invalidate(partKey);
}
}
}
public void invalidateDbCache(String dbName) {
long start = System.currentTimeMillis();
Set<PartitionValueCacheKey> keys = partitionValuesCache.asMap().keySet();
for (PartitionValueCacheKey key : keys) {
if (key.dbName.equals(dbName)) {
invalidateTableCache(dbName, key.tblName);
}
}
LOG.debug("invalid db cache for {} in catalog {}, cache num: {}, cost: {} ms", dbName, catalog.getName(),
keys.size(), (System.currentTimeMillis() - start));
}
public void invalidateAll() {
partitionValuesCache.invalidateAll();
partitionCache.invalidateAll();
fileCacheRef.get().invalidateAll();
LOG.debug("invalid all meta cache in catalog {}", catalog.getName());
}
public void addPartitionsCache(String dbName, String tblName, List<String> partitionNames,
List<Type> partitionColumnTypes) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, partitionColumnTypes);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues == null) {
return;
}
HivePartitionValues copy = partitionValues.copy();
Map<Long, PartitionItem> idToPartitionItemBefore = copy.getIdToPartitionItem();
Map<String, Long> partitionNameToIdMapBefore = copy.getPartitionNameToIdMap();
Map<Long, List<UniqueId>> idToUniqueIdsMap = copy.getIdToUniqueIdsMap();
Map<Long, PartitionItem> idToPartitionItem = new HashMap<>();
long idx = copy.getNextPartitionId();
for (String partitionName : partitionNames) {
if (partitionNameToIdMapBefore.containsKey(partitionName)) {
LOG.info("addPartitionsCache partitionName:[{}] has exist in table:[{}]", partitionName, tblName);
continue;
}
long partitionId = idx++;
ListPartitionItem listPartitionItem = toListPartitionItem(partitionName, key.types);
idToPartitionItemBefore.put(partitionId, listPartitionItem);
idToPartitionItem.put(partitionId, listPartitionItem);
partitionNameToIdMapBefore.put(partitionName, partitionId);
}
Map<Long, List<String>> partitionValuesMapBefore = copy.getPartitionValuesMap();
Map<Long, List<String>> partitionValuesMap = ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
partitionValuesMapBefore.putAll(partitionValuesMap);
copy.setNextPartitionId(idx);
if (key.types.size() > 1) {
Map<UniqueId, Range<PartitionKey>> uidToPartitionRangeBefore = copy.getUidToPartitionRange();
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange = ListPartitionPrunerV2
.genUidToPartitionRange(idToPartitionItem, idToUniqueIdsMap);
uidToPartitionRangeBefore.putAll(uidToPartitionRange);
Map<Range<PartitionKey>, UniqueId> rangeToIdBefore = copy.getRangeToId();
Map<Range<PartitionKey>, UniqueId> rangeToId = ListPartitionPrunerV2.genRangeToId(uidToPartitionRange);
rangeToIdBefore.putAll(rangeToId);
} else {
Preconditions.checkState(key.types.size() == 1, key.types);
RangeMap<ColumnBound, UniqueId> singleColumnRangeMapBefore = copy.getSingleColumnRangeMap();
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap = ListPartitionPrunerV2
.genSingleColumnRangeMap(idToPartitionItem, idToUniqueIdsMap);
singleColumnRangeMapBefore.putAll(singleColumnRangeMap);
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMapBefore = copy
.getSingleUidToColumnRangeMap();
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap = ListPartitionPrunerV2
.genSingleUidToColumnRange(singleColumnRangeMap);
singleUidToColumnRangeMapBefore.putAll(singleUidToColumnRangeMap);
}
HivePartitionValues partitionValuesCur = partitionValuesCache.getIfPresent(key);
if (partitionValuesCur == partitionValues) {
partitionValuesCache.put(key, copy);
}
}
public void dropPartitionsCache(String dbName, String tblName, List<String> partitionNames,
boolean invalidPartitionCache) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues == null) {
return;
}
HivePartitionValues copy = partitionValues.copy();
Map<String, Long> partitionNameToIdMapBefore = copy.getPartitionNameToIdMap();
Map<Long, PartitionItem> idToPartitionItemBefore = copy.getIdToPartitionItem();
Map<Long, List<UniqueId>> idToUniqueIdsMapBefore = copy.getIdToUniqueIdsMap();
Map<UniqueId, Range<PartitionKey>> uidToPartitionRangeBefore = copy.getUidToPartitionRange();
Map<Range<PartitionKey>, UniqueId> rangeToIdBefore = copy.getRangeToId();
RangeMap<ColumnBound, UniqueId> singleColumnRangeMapBefore = copy.getSingleColumnRangeMap();
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMapBefore = copy.getSingleUidToColumnRangeMap();
Map<Long, List<String>> partitionValuesMap = copy.getPartitionValuesMap();
for (String partitionName : partitionNames) {
if (!partitionNameToIdMapBefore.containsKey(partitionName)) {
LOG.info("dropPartitionsCache partitionName:[{}] not exist in table:[{}]", partitionName, tblName);
continue;
}
Long partitionId = partitionNameToIdMapBefore.remove(partitionName);
idToPartitionItemBefore.remove(partitionId);
partitionValuesMap.remove(partitionId);
List<UniqueId> uniqueIds = idToUniqueIdsMapBefore.remove(partitionId);
for (UniqueId uniqueId : uniqueIds) {
if (uidToPartitionRangeBefore != null) {
Range<PartitionKey> range = uidToPartitionRangeBefore.remove(uniqueId);
if (range != null) {
rangeToIdBefore.remove(range);
}
}
if (singleUidToColumnRangeMapBefore != null) {
Range<ColumnBound> range = singleUidToColumnRangeMapBefore.remove(uniqueId);
if (range != null) {
singleColumnRangeMapBefore.remove(range);
}
}
}
if (invalidPartitionCache) {
invalidatePartitionCache(dbName, tblName, partitionName);
}
}
HivePartitionValues partitionValuesCur = partitionValuesCache.getIfPresent(key);
if (partitionValuesCur == partitionValues) {
partitionValuesCache.put(key, copy);
}
}
public void putPartitionValuesCacheForTest(PartitionValueCacheKey key, HivePartitionValues values) {
partitionValuesCache.put(key, values);
}
/***
* get the guava CacheLoader
* if the fileMetaCacheTtlSecond equal 0 , the synchronous loader is used
* if the fileMetaCacheTtlSecond greater than 0 , the asynchronous loader is used
* @param executor
* @param fileMetaCacheTtlSecond
* @return
*/
private CacheLoader<FileCacheKey, FileCacheValue> getGuavaCacheLoader(Executor executor,
int fileMetaCacheTtlSecond) {
CacheLoader<FileCacheKey, FileCacheValue> loader =
new CacheLoader<FileCacheKey, FileCacheValue>() {
@Override
public FileCacheValue load(FileCacheKey key) throws Exception {
return loadFiles(key);
}
};
if (fileMetaCacheTtlSecond == HMSExternalCatalog.FILE_META_CACHE_TTL_DISABLE_CACHE) {
return loader;
} else {
return CacheLoader.asyncReloading(loader, executor);
}
}
/***
* get fileCache ref
* @return
*/
public AtomicReference<LoadingCache<FileCacheKey, FileCacheValue>> getFileCacheRef() {
return fileCacheRef;
}
public List<FileCacheValue> getFilesByTransaction(List<HivePartition> partitions, ValidWriteIdList validWriteIds,
boolean isFullAcid) {
List<FileCacheValue> fileCacheValues = Lists.newArrayList();
String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME);
try {
for (HivePartition partition : partitions) {
FileCacheValue fileCacheValue = new FileCacheValue();
AcidUtils.Directory directory;
if (!Strings.isNullOrEmpty(remoteUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(remoteUser);
directory = ugi.doAs((PrivilegedExceptionAction<AcidUtils.Directory>) () -> AcidUtils.getAcidState(
new Path(partition.getPath()), jobConf, validWriteIds, false, true));
} else {
directory = AcidUtils.getAcidState(new Path(partition.getPath()), jobConf, validWriteIds, false,
true);
}
if (!directory.getOriginalFiles().isEmpty()) {
throw new Exception("Original non-ACID files in transactional tables are not supported");
}
if (isFullAcid) {
int acidVersion = 2;
/**
* From Hive version >= 3.0, delta/base files will always have file '_orc_acid_version'
* with value >= '2'.
*/
Path baseOrDeltaPath = directory.getBaseDirectory() != null ? directory.getBaseDirectory() :
!directory.getCurrentDirectories().isEmpty() ? directory.getCurrentDirectories().get(0)
.getPath() : null;
String acidVersionPath = new Path(baseOrDeltaPath, "_orc_acid_version").toUri().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(baseOrDeltaPath.toUri().toString(), jobConf);
Status status = fs.exists(acidVersionPath);
if (status != Status.OK) {
if (status.getErrCode() == ErrCode.NOT_FOUND) {
acidVersion = 0;
} else {
throw new Exception(String.format("Failed to check remote path {} exists.",
acidVersionPath));
}
}
if (acidVersion == 0 && !directory.getCurrentDirectories().isEmpty()) {
throw new Exception(
"Hive 2.x versioned full-acid tables need to run major compaction.");
}
}
List<DeleteDeltaInfo> deleteDeltas = new ArrayList<>();
for (AcidUtils.ParsedDelta delta : directory.getCurrentDirectories()) {
String location = delta.getPath().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false);
if (delta.isDeleteDelta()) {
List<String> deleteDeltaFileNames = locatedFiles.files().stream().map(f -> f.getName()).filter(
name -> name.startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.collect(Collectors.toList());
deleteDeltas.add(new DeleteDeltaInfo(location, deleteDeltaFileNames));
continue;
}
locatedFiles.files().stream().filter(
f -> f.getName().startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.forEach(fileCacheValue::addFile);
}
if (directory.getBaseDirectory() != null) {
String location = directory.getBaseDirectory().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false);
locatedFiles.files().stream().filter(
f -> f.getName().startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.forEach(fileCacheValue::addFile);
}
fileCacheValue.setAcidInfo(new AcidInfo(partition.getPath(), deleteDeltas));
fileCacheValues.add(fileCacheValue);
}
} catch (Exception e) {
throw new CacheException("failed to get input splits for write ids %s in catalog %s", e,
validWriteIds.toString(), catalog.getName());
}
return fileCacheValues;
}
/**
* The Key of hive partition value cache
*/
@Data
public static class PartitionValueCacheKey {
private String dbName;
private String tblName;
private List<Type> types;
public PartitionValueCacheKey(String dbName, String tblName, List<Type> types) {
this.dbName = dbName;
this.tblName = tblName;
this.types = types;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof PartitionValueCacheKey)) {
return false;
}
return dbName.equals(((PartitionValueCacheKey) obj).dbName)
&& tblName.equals(((PartitionValueCacheKey) obj).tblName);
}
@Override
public int hashCode() {
return Objects.hash(dbName, tblName);
}
@Override
public String toString() {
return "PartitionValueCacheKey{" + "dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + '}';
}
}
@Data
public static class PartitionCacheKey {
private String dbName;
private String tblName;
private List<String> values;
public PartitionCacheKey(String dbName, String tblName, List<String> values) {
this.dbName = dbName;
this.tblName = tblName;
this.values = values;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof PartitionCacheKey)) {
return false;
}
return dbName.equals(((PartitionCacheKey) obj).dbName)
&& tblName.equals(((PartitionCacheKey) obj).tblName)
&& Objects.equals(values, ((PartitionCacheKey) obj).values);
}
@Override
public int hashCode() {
return Objects.hash(dbName, tblName, values);
}
@Override
public String toString() {
return "PartitionCacheKey{" + "dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + ", values="
+ values + '}';
}
}
@Data
public static class FileCacheKey {
private String dummyKey;
private String location;
private String inputFormat;
private boolean useSelfSplitter;
protected List<String> partitionValues;
public FileCacheKey(String location, String inputFormat, List<String> partitionValues) {
this.location = location;
this.inputFormat = inputFormat;
this.partitionValues = partitionValues == null ? Lists.newArrayList() : partitionValues;
this.useSelfSplitter = true;
}
public static FileCacheKey createDummyCacheKey(String dbName, String tblName, String location,
String inputFormat, boolean useSelfSplitter) {
FileCacheKey fileCacheKey = new FileCacheKey(location, inputFormat, null);
fileCacheKey.dummyKey = dbName + "." + tblName;
fileCacheKey.useSelfSplitter = useSelfSplitter;
return fileCacheKey;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof FileCacheKey)) {
return false;
}
if (dummyKey != null) {
return dummyKey.equals(((FileCacheKey) obj).dummyKey);
}
return location.equals(((FileCacheKey) obj).location)
&& partitionValues.equals(((FileCacheKey) obj).partitionValues);
}
@Override
public int hashCode() {
if (dummyKey != null) {
return Objects.hash(dummyKey);
}
return Objects.hash(location, partitionValues);
}
@Override
public String toString() {
return "FileCacheKey{" + "location='" + location + '\'' + ", inputFormat='" + inputFormat + '\'' + '}';
}
}
@Data
public static class FileCacheValue {
private List<HiveFileStatus> files;
private List<Split> splits;
private boolean isSplittable;
protected List<String> partitionValues;
private AcidInfo acidInfo;
public void addFile(RemoteFile file) {
if (files == null) {
files = Lists.newArrayList();
}
HiveFileStatus status = new HiveFileStatus();
status.setBlockLocations(file.getBlockLocations());
status.setPath(file.getPath());
status.length = file.getSize();
status.blockSize = file.getBlockSize();
status.modificationTime = file.getModificationTime();
files.add(status);
}
public void addSplit(Split split) {
if (splits == null) {
splits = Lists.newArrayList();
}
splits.add(split);
}
public int getValuesSize() {
return partitionValues == null ? 0 : partitionValues.size();
}
public AcidInfo getAcidInfo() {
return acidInfo;
}
public void setAcidInfo(AcidInfo acidInfo) {
this.acidInfo = acidInfo;
}
}
@Data
public static class HiveFileStatus {
BlockLocation[] blockLocations;
Path path;
long length;
long blockSize;
long modificationTime;
}
@Data
public static class HivePartitionValues {
private long nextPartitionId;
private Map<String, Long> partitionNameToIdMap;
private Map<Long, List<UniqueId>> idToUniqueIdsMap;
private Map<Long, PartitionItem> idToPartitionItem;
private Map<Long, List<String>> partitionValuesMap;
private Map<UniqueId, Range<PartitionKey>> uidToPartitionRange;
private Map<Range<PartitionKey>, UniqueId> rangeToId;
private RangeMap<ColumnBound, UniqueId> singleColumnRangeMap;
private Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap;
public HivePartitionValues() {
}
public HivePartitionValues(Map<Long, PartitionItem> idToPartitionItem,
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange,
Map<Range<PartitionKey>, UniqueId> rangeToId,
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap,
long nextPartitionId,
Map<String, Long> partitionNameToIdMap,
Map<Long, List<UniqueId>> idToUniqueIdsMap,
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap,
Map<Long, List<String>> partitionValuesMap) {
this.idToPartitionItem = idToPartitionItem;
this.uidToPartitionRange = uidToPartitionRange;
this.rangeToId = rangeToId;
this.singleColumnRangeMap = singleColumnRangeMap;
this.nextPartitionId = nextPartitionId;
this.partitionNameToIdMap = partitionNameToIdMap;
this.idToUniqueIdsMap = idToUniqueIdsMap;
this.singleUidToColumnRangeMap = singleUidToColumnRangeMap;
this.partitionValuesMap = partitionValuesMap;
}
public HivePartitionValues copy() {
HivePartitionValues copy = new HivePartitionValues();
copy.setNextPartitionId(nextPartitionId);
copy.setPartitionNameToIdMap(partitionNameToIdMap == null ? null : Maps.newHashMap(partitionNameToIdMap));
copy.setIdToUniqueIdsMap(idToUniqueIdsMap == null ? null : Maps.newHashMap(idToUniqueIdsMap));
copy.setIdToPartitionItem(idToPartitionItem == null ? null : Maps.newHashMap(idToPartitionItem));
copy.setPartitionValuesMap(partitionValuesMap == null ? null : Maps.newHashMap(partitionValuesMap));
copy.setUidToPartitionRange(uidToPartitionRange == null ? null : Maps.newHashMap(uidToPartitionRange));
copy.setRangeToId(rangeToId == null ? null : Maps.newHashMap(rangeToId));
copy.setSingleUidToColumnRangeMap(
singleUidToColumnRangeMap == null ? null : Maps.newHashMap(singleUidToColumnRangeMap));
if (singleColumnRangeMap != null) {
RangeMap<ColumnBound, UniqueId> copySingleColumnRangeMap = TreeRangeMap.create();
copySingleColumnRangeMap.putAll(singleColumnRangeMap);
copy.setSingleColumnRangeMap(copySingleColumnRangeMap);
}
return copy;
}
}
}
|
class HiveMetaStoreCache {
private static final Logger LOG = LogManager.getLogger(HiveMetaStoreCache.class);
private static final int MIN_BATCH_FETCH_PARTITION_NUM = 50;
public static final String HIVE_DEFAULT_PARTITION = "__HIVE_DEFAULT_PARTITION__";
public static final String HIVE_ORC_ACID_VERSION_FILE = "_orc_acid_version";
private static final String HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX = "bucket_";
private HMSExternalCatalog catalog;
private JobConf jobConf;
private Executor executor;
private LoadingCache<PartitionValueCacheKey, HivePartitionValues> partitionValuesCache;
private LoadingCache<PartitionCacheKey, HivePartition> partitionCache;
private volatile AtomicReference<LoadingCache<FileCacheKey, FileCacheValue>> fileCacheRef
= new AtomicReference<>();
public HiveMetaStoreCache(HMSExternalCatalog catalog, Executor executor) {
this.catalog = catalog;
this.executor = executor;
init();
initMetrics();
}
private void init() {
partitionValuesCache = CacheBuilder.newBuilder().maximumSize(Config.max_hive_table_catch_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES)
.build(CacheLoader.asyncReloading(
new CacheLoader<PartitionValueCacheKey, HivePartitionValues>() {
@Override
public HivePartitionValues load(PartitionValueCacheKey key) throws Exception {
return loadPartitionValues(key);
}
}, executor));
partitionCache = CacheBuilder.newBuilder().maximumSize(Config.max_hive_partition_cache_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES)
.build(CacheLoader.asyncReloading(new CacheLoader<PartitionCacheKey, HivePartition>() {
@Override
public HivePartition load(PartitionCacheKey key) throws Exception {
return loadPartitions(key);
}
}, executor));
setNewFileCache();
}
/***
* generate a filecache and set to fileCacheRef
*/
public void setNewFileCache() {
setJobConf();
int fileMetaCacheTtlSecond = NumberUtils.toInt(
(catalog.getProperties().get(HMSExternalCatalog.FILE_META_CACHE_TTL_SECOND)),
HMSExternalCatalog.FILE_META_CACHE_NO_TTL);
CacheBuilder<Object, Object> fileCacheBuilder = CacheBuilder.newBuilder()
.maximumSize(Config.max_external_file_cache_num)
.expireAfterAccess(Config.external_cache_expire_time_minutes_after_access, TimeUnit.MINUTES);
if (fileMetaCacheTtlSecond >= HMSExternalCatalog.FILE_META_CACHE_TTL_DISABLE_CACHE) {
fileCacheBuilder.expireAfterWrite(fileMetaCacheTtlSecond, TimeUnit.SECONDS);
}
CacheLoader<FileCacheKey, FileCacheValue> loader = getGuavaCacheLoader(executor,
fileMetaCacheTtlSecond);
LoadingCache<FileCacheKey, FileCacheValue> preFileCache = fileCacheRef.get();
fileCacheRef.set(fileCacheBuilder.build(loader));
if (Objects.nonNull(preFileCache)) {
preFileCache.invalidateAll();
}
}
private void initMetrics() {
GaugeMetric<Long> valueCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive partition value cache number") {
@Override
public Long getValue() {
return partitionValuesCache.size();
}
};
valueCacheGauge.addLabel(new MetricLabel("type", "partition_value"));
valueCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(valueCacheGauge);
GaugeMetric<Long> partitionCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive partition cache number") {
@Override
public Long getValue() {
return partitionCache.size();
}
};
partitionCacheGauge.addLabel(new MetricLabel("type", "partition"));
partitionCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(partitionCacheGauge);
GaugeMetric<Long> fileCacheGauge = new GaugeMetric<Long>("hive_meta_cache",
Metric.MetricUnit.NOUNIT, "hive file cache number") {
@Override
public Long getValue() {
return fileCacheRef.get().size();
}
};
fileCacheGauge.addLabel(new MetricLabel("type", "file"));
fileCacheGauge.addLabel(new MetricLabel("catalog", catalog.getName()));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(fileCacheGauge);
}
private HivePartitionValues loadPartitionValues(PartitionValueCacheKey key) {
List<String> partitionNames = catalog.getClient().listPartitionNames(key.dbName, key.tblName);
if (LOG.isDebugEnabled()) {
LOG.debug("load
}
Map<Long, PartitionItem> idToPartitionItem = Maps.newHashMapWithExpectedSize(partitionNames.size());
Map<String, Long> partitionNameToIdMap = Maps.newHashMapWithExpectedSize(partitionNames.size());
Map<Long, List<UniqueId>> idToUniqueIdsMap = Maps.newHashMapWithExpectedSize(partitionNames.size());
long idx = 0;
for (String partitionName : partitionNames) {
long partitionId = idx++;
ListPartitionItem listPartitionItem = toListPartitionItem(partitionName, key.types);
idToPartitionItem.put(partitionId, listPartitionItem);
partitionNameToIdMap.put(partitionName, partitionId);
}
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange = null;
Map<Range<PartitionKey>, UniqueId> rangeToId = null;
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap = null;
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap = null;
if (key.types.size() > 1) {
uidToPartitionRange = ListPartitionPrunerV2.genUidToPartitionRange(idToPartitionItem, idToUniqueIdsMap);
rangeToId = ListPartitionPrunerV2.genRangeToId(uidToPartitionRange);
} else {
Preconditions.checkState(key.types.size() == 1, key.types);
singleColumnRangeMap = ListPartitionPrunerV2.genSingleColumnRangeMap(idToPartitionItem, idToUniqueIdsMap);
singleUidToColumnRangeMap = ListPartitionPrunerV2.genSingleUidToColumnRange(singleColumnRangeMap);
}
Map<Long, List<String>> partitionValuesMap = ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
return new HivePartitionValues(idToPartitionItem, uidToPartitionRange, rangeToId, singleColumnRangeMap, idx,
partitionNameToIdMap, idToUniqueIdsMap, singleUidToColumnRangeMap, partitionValuesMap);
}
public ListPartitionItem toListPartitionItem(String partitionName, List<Type> types) {
String[] parts = partitionName.split("/");
Preconditions.checkState(parts.length == types.size(), partitionName + " vs. " + types);
List<PartitionValue> values = Lists.newArrayListWithExpectedSize(types.size());
for (String part : parts) {
String[] kv = part.split("=");
Preconditions.checkState(kv.length == 2, partitionName);
String partitionValue;
try {
partitionValue = URLDecoder.decode(kv[1], StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
values.add(new PartitionValue(partitionValue, HIVE_DEFAULT_PARTITION.equals(partitionValue)));
}
try {
PartitionKey key = PartitionKey.createListPartitionKeyWithTypes(values, types);
return new ListPartitionItem(Lists.newArrayList(key));
} catch (AnalysisException e) {
throw new CacheException("failed to convert hive partition %s to list partition in catalog %s",
e, partitionName, catalog.getName());
}
}
private HivePartition loadPartitions(PartitionCacheKey key) {
Partition partition = catalog.getClient().getPartition(key.dbName, key.tblName, key.values);
StorageDescriptor sd = partition.getSd();
if (LOG.isDebugEnabled()) {
LOG.debug("load partition format: {}, location: {} for {} in catalog {}",
sd.getInputFormat(), sd.getLocation(), key, catalog.getName());
}
return new HivePartition(key.dbName, key.tblName, false, sd.getInputFormat(), sd.getLocation(), key.values);
}
private FileCacheValue getFileCache(String location, InputFormat<?, ?> inputFormat,
JobConf jobConf,
List<String> partitionValues) throws UserException {
FileCacheValue result = new FileCacheValue();
result.setSplittable(HiveUtil.isSplittable(inputFormat, new Path(location), jobConf));
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
try {
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, true);
locatedFiles.files().forEach(result::addFile);
} catch (Exception e) {
if (e.getCause() instanceof FileNotFoundException) {
LOG.warn(String.format("File %s not exist.", location));
} else {
throw e;
}
}
result.setPartitionValues(partitionValues);
return result;
}
private synchronized void setJobConf() {
Configuration configuration = new HdfsConfiguration();
for (Map.Entry<String, String> entry : catalog.getCatalogProperty().getHadoopProperties().entrySet()) {
configuration.set(entry.getKey(), entry.getValue());
}
jobConf = new JobConf(configuration);
jobConf.set("mapreduce.input.fileinputformat.input.dir.recursive", "true");
jobConf.set("fs.hdfs.impl.disable.cache", "true");
jobConf.set("fs.file.impl.disable.cache", "true");
}
private synchronized void updateJobConf(String key, String value) {
jobConf.set(key, value);
}
public HivePartitionValues getPartitionValues(String dbName, String tblName, List<Type> types) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, types);
return getPartitionValues(key);
}
public HivePartitionValues getPartitionValues(PartitionValueCacheKey key) {
try {
return partitionValuesCache.get(key);
} catch (ExecutionException e) {
throw new CacheException("failed to get partition values for %s in catalog %s", e, key, catalog.getName());
}
}
public List<FileCacheValue> getFilesByPartitions(List<HivePartition> partitions, boolean useSelfSplitter) {
long start = System.currentTimeMillis();
List<FileCacheKey> keys = Lists.newArrayListWithExpectedSize(partitions.size());
partitions.stream().forEach(p -> {
FileCacheKey fileCacheKey = p.isDummyPartition()
? FileCacheKey.createDummyCacheKey(p.getDbName(), p.getTblName(), p.getPath(),
p.getInputFormat(), useSelfSplitter)
: new FileCacheKey(p.getPath(), p.getInputFormat(), p.getPartitionValues());
fileCacheKey.setUseSelfSplitter(useSelfSplitter);
keys.add(fileCacheKey);
});
Stream<FileCacheKey> stream;
if (partitions.size() < MIN_BATCH_FETCH_PARTITION_NUM) {
stream = keys.stream();
} else {
stream = keys.parallelStream();
}
List<FileCacheValue> fileLists = stream.map(k -> {
try {
FileCacheValue fileCacheValue = fileCacheRef.get().get(k);
for (int i = 0; i < fileCacheValue.getValuesSize(); i++) {
if (HIVE_DEFAULT_PARTITION.equals(fileCacheValue.getPartitionValues().get(i))) {
fileCacheValue.getPartitionValues().set(i, FeConstants.null_string);
}
}
return fileCacheValue;
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}).collect(Collectors.toList());
LOG.debug("get
fileLists.stream().mapToInt(l -> l.getFiles() == null
? (l.getSplits() == null ? 0 : l.getSplits().size()) : l.getFiles().size()).sum(),
partitions.size(), catalog.getName(), (System.currentTimeMillis() - start));
return fileLists;
}
public List<HivePartition> getAllPartitions(String dbName, String name, List<List<String>> partitionValuesList) {
long start = System.currentTimeMillis();
List<PartitionCacheKey> keys = Lists.newArrayListWithExpectedSize(partitionValuesList.size());
partitionValuesList.stream().forEach(p -> keys.add(new PartitionCacheKey(dbName, name, p)));
Stream<PartitionCacheKey> stream;
if (partitionValuesList.size() < MIN_BATCH_FETCH_PARTITION_NUM) {
stream = keys.stream();
} else {
stream = keys.parallelStream();
}
List<HivePartition> partitions = stream.map(k -> {
try {
return partitionCache.get(k);
} catch (ExecutionException e) {
throw new CacheException("failed to get partition for %s in catalog %s", e, k, catalog.getName());
}
}).collect(Collectors.toList());
LOG.debug("get
(System.currentTimeMillis() - start));
return partitions;
}
public void invalidateTableCache(String dbName, String tblName) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues != null) {
long start = System.currentTimeMillis();
for (List<String> values : partitionValues.partitionValuesMap.values()) {
PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values);
HivePartition partition = partitionCache.getIfPresent(partKey);
if (partition != null) {
fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(),
null, partition.getPartitionValues()));
partitionCache.invalidate(partKey);
}
}
partitionValuesCache.invalidate(key);
LOG.debug("invalid table cache for {}.{} in catalog {}, cache num: {}, cost: {} ms",
dbName, tblName, catalog.getName(), partitionValues.partitionValuesMap.size(),
(System.currentTimeMillis() - start));
} else {
/**
* A file cache entry can be created reference to
* {@link org.apache.doris.planner.external.HiveSplitter
* so we need to invalidate it if this is a non-partitioned table.
* We use {@link org.apache.doris.datasource.hive.HiveMetaStoreCache.FileCacheKey
* to avoid invocation by Hms Client, because this method may be invoked when salve FE replay journal logs,
* and FE will exit if some network problems occur.
* */
FileCacheKey fileCacheKey = FileCacheKey.createDummyCacheKey(
dbName, tblName, null, null, false);
fileCacheRef.get().invalidate(fileCacheKey);
}
}
public void invalidatePartitionCache(String dbName, String tblName, String partitionName) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues != null) {
Long partitionId = partitionValues.partitionNameToIdMap.get(partitionName);
List<String> values = partitionValues.partitionValuesMap.get(partitionId);
PartitionCacheKey partKey = new PartitionCacheKey(dbName, tblName, values);
HivePartition partition = partitionCache.getIfPresent(partKey);
if (partition != null) {
fileCacheRef.get().invalidate(new FileCacheKey(partition.getPath(),
null, partition.getPartitionValues()));
partitionCache.invalidate(partKey);
}
}
}
public void invalidateDbCache(String dbName) {
long start = System.currentTimeMillis();
Set<PartitionValueCacheKey> keys = partitionValuesCache.asMap().keySet();
for (PartitionValueCacheKey key : keys) {
if (key.dbName.equals(dbName)) {
invalidateTableCache(dbName, key.tblName);
}
}
LOG.debug("invalid db cache for {} in catalog {}, cache num: {}, cost: {} ms", dbName, catalog.getName(),
keys.size(), (System.currentTimeMillis() - start));
}
public void invalidateAll() {
partitionValuesCache.invalidateAll();
partitionCache.invalidateAll();
fileCacheRef.get().invalidateAll();
LOG.debug("invalid all meta cache in catalog {}", catalog.getName());
}
public void addPartitionsCache(String dbName, String tblName, List<String> partitionNames,
List<Type> partitionColumnTypes) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, partitionColumnTypes);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues == null) {
return;
}
HivePartitionValues copy = partitionValues.copy();
Map<Long, PartitionItem> idToPartitionItemBefore = copy.getIdToPartitionItem();
Map<String, Long> partitionNameToIdMapBefore = copy.getPartitionNameToIdMap();
Map<Long, List<UniqueId>> idToUniqueIdsMap = copy.getIdToUniqueIdsMap();
Map<Long, PartitionItem> idToPartitionItem = new HashMap<>();
long idx = copy.getNextPartitionId();
for (String partitionName : partitionNames) {
if (partitionNameToIdMapBefore.containsKey(partitionName)) {
LOG.info("addPartitionsCache partitionName:[{}] has exist in table:[{}]", partitionName, tblName);
continue;
}
long partitionId = idx++;
ListPartitionItem listPartitionItem = toListPartitionItem(partitionName, key.types);
idToPartitionItemBefore.put(partitionId, listPartitionItem);
idToPartitionItem.put(partitionId, listPartitionItem);
partitionNameToIdMapBefore.put(partitionName, partitionId);
}
Map<Long, List<String>> partitionValuesMapBefore = copy.getPartitionValuesMap();
Map<Long, List<String>> partitionValuesMap = ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
partitionValuesMapBefore.putAll(partitionValuesMap);
copy.setNextPartitionId(idx);
if (key.types.size() > 1) {
Map<UniqueId, Range<PartitionKey>> uidToPartitionRangeBefore = copy.getUidToPartitionRange();
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange = ListPartitionPrunerV2
.genUidToPartitionRange(idToPartitionItem, idToUniqueIdsMap);
uidToPartitionRangeBefore.putAll(uidToPartitionRange);
Map<Range<PartitionKey>, UniqueId> rangeToIdBefore = copy.getRangeToId();
Map<Range<PartitionKey>, UniqueId> rangeToId = ListPartitionPrunerV2.genRangeToId(uidToPartitionRange);
rangeToIdBefore.putAll(rangeToId);
} else {
Preconditions.checkState(key.types.size() == 1, key.types);
RangeMap<ColumnBound, UniqueId> singleColumnRangeMapBefore = copy.getSingleColumnRangeMap();
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap = ListPartitionPrunerV2
.genSingleColumnRangeMap(idToPartitionItem, idToUniqueIdsMap);
singleColumnRangeMapBefore.putAll(singleColumnRangeMap);
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMapBefore = copy
.getSingleUidToColumnRangeMap();
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap = ListPartitionPrunerV2
.genSingleUidToColumnRange(singleColumnRangeMap);
singleUidToColumnRangeMapBefore.putAll(singleUidToColumnRangeMap);
}
HivePartitionValues partitionValuesCur = partitionValuesCache.getIfPresent(key);
if (partitionValuesCur == partitionValues) {
partitionValuesCache.put(key, copy);
}
}
public void dropPartitionsCache(String dbName, String tblName, List<String> partitionNames,
boolean invalidPartitionCache) {
PartitionValueCacheKey key = new PartitionValueCacheKey(dbName, tblName, null);
HivePartitionValues partitionValues = partitionValuesCache.getIfPresent(key);
if (partitionValues == null) {
return;
}
HivePartitionValues copy = partitionValues.copy();
Map<String, Long> partitionNameToIdMapBefore = copy.getPartitionNameToIdMap();
Map<Long, PartitionItem> idToPartitionItemBefore = copy.getIdToPartitionItem();
Map<Long, List<UniqueId>> idToUniqueIdsMapBefore = copy.getIdToUniqueIdsMap();
Map<UniqueId, Range<PartitionKey>> uidToPartitionRangeBefore = copy.getUidToPartitionRange();
Map<Range<PartitionKey>, UniqueId> rangeToIdBefore = copy.getRangeToId();
RangeMap<ColumnBound, UniqueId> singleColumnRangeMapBefore = copy.getSingleColumnRangeMap();
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMapBefore = copy.getSingleUidToColumnRangeMap();
Map<Long, List<String>> partitionValuesMap = copy.getPartitionValuesMap();
for (String partitionName : partitionNames) {
if (!partitionNameToIdMapBefore.containsKey(partitionName)) {
LOG.info("dropPartitionsCache partitionName:[{}] not exist in table:[{}]", partitionName, tblName);
continue;
}
Long partitionId = partitionNameToIdMapBefore.remove(partitionName);
idToPartitionItemBefore.remove(partitionId);
partitionValuesMap.remove(partitionId);
List<UniqueId> uniqueIds = idToUniqueIdsMapBefore.remove(partitionId);
for (UniqueId uniqueId : uniqueIds) {
if (uidToPartitionRangeBefore != null) {
Range<PartitionKey> range = uidToPartitionRangeBefore.remove(uniqueId);
if (range != null) {
rangeToIdBefore.remove(range);
}
}
if (singleUidToColumnRangeMapBefore != null) {
Range<ColumnBound> range = singleUidToColumnRangeMapBefore.remove(uniqueId);
if (range != null) {
singleColumnRangeMapBefore.remove(range);
}
}
}
if (invalidPartitionCache) {
invalidatePartitionCache(dbName, tblName, partitionName);
}
}
HivePartitionValues partitionValuesCur = partitionValuesCache.getIfPresent(key);
if (partitionValuesCur == partitionValues) {
partitionValuesCache.put(key, copy);
}
}
public void putPartitionValuesCacheForTest(PartitionValueCacheKey key, HivePartitionValues values) {
partitionValuesCache.put(key, values);
}
/***
* get the guava CacheLoader
* if the fileMetaCacheTtlSecond equal 0 , the synchronous loader is used
* if the fileMetaCacheTtlSecond greater than 0 , the asynchronous loader is used
* @param executor
* @param fileMetaCacheTtlSecond
* @return
*/
private CacheLoader<FileCacheKey, FileCacheValue> getGuavaCacheLoader(Executor executor,
int fileMetaCacheTtlSecond) {
CacheLoader<FileCacheKey, FileCacheValue> loader =
new CacheLoader<FileCacheKey, FileCacheValue>() {
@Override
public FileCacheValue load(FileCacheKey key) throws Exception {
return loadFiles(key);
}
};
if (fileMetaCacheTtlSecond == HMSExternalCatalog.FILE_META_CACHE_TTL_DISABLE_CACHE) {
return loader;
} else {
return CacheLoader.asyncReloading(loader, executor);
}
}
/***
* get fileCache ref
* @return
*/
public AtomicReference<LoadingCache<FileCacheKey, FileCacheValue>> getFileCacheRef() {
return fileCacheRef;
}
public List<FileCacheValue> getFilesByTransaction(List<HivePartition> partitions, ValidWriteIdList validWriteIds,
boolean isFullAcid) {
List<FileCacheValue> fileCacheValues = Lists.newArrayList();
String remoteUser = jobConf.get(HdfsResource.HADOOP_USER_NAME);
try {
for (HivePartition partition : partitions) {
FileCacheValue fileCacheValue = new FileCacheValue();
AcidUtils.Directory directory;
if (!Strings.isNullOrEmpty(remoteUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(remoteUser);
directory = ugi.doAs((PrivilegedExceptionAction<AcidUtils.Directory>) () -> AcidUtils.getAcidState(
new Path(partition.getPath()), jobConf, validWriteIds, false, true));
} else {
directory = AcidUtils.getAcidState(new Path(partition.getPath()), jobConf, validWriteIds, false,
true);
}
if (!directory.getOriginalFiles().isEmpty()) {
throw new Exception("Original non-ACID files in transactional tables are not supported");
}
if (isFullAcid) {
int acidVersion = 2;
/**
* From Hive version >= 3.0, delta/base files will always have file '_orc_acid_version'
* with value >= '2'.
*/
Path baseOrDeltaPath = directory.getBaseDirectory() != null ? directory.getBaseDirectory() :
!directory.getCurrentDirectories().isEmpty() ? directory.getCurrentDirectories().get(0)
.getPath() : null;
String acidVersionPath = new Path(baseOrDeltaPath, "_orc_acid_version").toUri().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(baseOrDeltaPath.toUri().toString(), jobConf);
Status status = fs.exists(acidVersionPath);
if (status != Status.OK) {
if (status.getErrCode() == ErrCode.NOT_FOUND) {
acidVersion = 0;
} else {
throw new Exception(String.format("Failed to check remote path {} exists.",
acidVersionPath));
}
}
if (acidVersion == 0 && !directory.getCurrentDirectories().isEmpty()) {
throw new Exception(
"Hive 2.x versioned full-acid tables need to run major compaction.");
}
}
List<DeleteDeltaInfo> deleteDeltas = new ArrayList<>();
for (AcidUtils.ParsedDelta delta : directory.getCurrentDirectories()) {
String location = delta.getPath().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false);
if (delta.isDeleteDelta()) {
List<String> deleteDeltaFileNames = locatedFiles.files().stream().map(f -> f.getName()).filter(
name -> name.startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.collect(Collectors.toList());
deleteDeltas.add(new DeleteDeltaInfo(location, deleteDeltaFileNames));
continue;
}
locatedFiles.files().stream().filter(
f -> f.getName().startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.forEach(fileCacheValue::addFile);
}
if (directory.getBaseDirectory() != null) {
String location = directory.getBaseDirectory().toString();
RemoteFileSystem fs = FileSystemFactory.getByLocation(location, jobConf);
RemoteFiles locatedFiles = fs.listLocatedFiles(location, true, false);
locatedFiles.files().stream().filter(
f -> f.getName().startsWith(HIVE_TRANSACTIONAL_ORC_BUCKET_PREFIX))
.forEach(fileCacheValue::addFile);
}
fileCacheValue.setAcidInfo(new AcidInfo(partition.getPath(), deleteDeltas));
fileCacheValues.add(fileCacheValue);
}
} catch (Exception e) {
throw new CacheException("failed to get input splits for write ids %s in catalog %s", e,
validWriteIds.toString(), catalog.getName());
}
return fileCacheValues;
}
/**
* The Key of hive partition value cache
*/
@Data
public static class PartitionValueCacheKey {
private String dbName;
private String tblName;
private List<Type> types;
public PartitionValueCacheKey(String dbName, String tblName, List<Type> types) {
this.dbName = dbName;
this.tblName = tblName;
this.types = types;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof PartitionValueCacheKey)) {
return false;
}
return dbName.equals(((PartitionValueCacheKey) obj).dbName)
&& tblName.equals(((PartitionValueCacheKey) obj).tblName);
}
@Override
public int hashCode() {
return Objects.hash(dbName, tblName);
}
@Override
public String toString() {
return "PartitionValueCacheKey{" + "dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + '}';
}
}
@Data
public static class PartitionCacheKey {
private String dbName;
private String tblName;
private List<String> values;
public PartitionCacheKey(String dbName, String tblName, List<String> values) {
this.dbName = dbName;
this.tblName = tblName;
this.values = values;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof PartitionCacheKey)) {
return false;
}
return dbName.equals(((PartitionCacheKey) obj).dbName)
&& tblName.equals(((PartitionCacheKey) obj).tblName)
&& Objects.equals(values, ((PartitionCacheKey) obj).values);
}
@Override
public int hashCode() {
return Objects.hash(dbName, tblName, values);
}
@Override
public String toString() {
return "PartitionCacheKey{" + "dbName='" + dbName + '\'' + ", tblName='" + tblName + '\'' + ", values="
+ values + '}';
}
}
@Data
public static class FileCacheKey {
private String dummyKey;
private String location;
private String inputFormat;
private boolean useSelfSplitter;
protected List<String> partitionValues;
public FileCacheKey(String location, String inputFormat, List<String> partitionValues) {
this.location = location;
this.inputFormat = inputFormat;
this.partitionValues = partitionValues == null ? Lists.newArrayList() : partitionValues;
this.useSelfSplitter = true;
}
public static FileCacheKey createDummyCacheKey(String dbName, String tblName, String location,
String inputFormat, boolean useSelfSplitter) {
FileCacheKey fileCacheKey = new FileCacheKey(location, inputFormat, null);
fileCacheKey.dummyKey = dbName + "." + tblName;
fileCacheKey.useSelfSplitter = useSelfSplitter;
return fileCacheKey;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof FileCacheKey)) {
return false;
}
if (dummyKey != null) {
return dummyKey.equals(((FileCacheKey) obj).dummyKey);
}
return location.equals(((FileCacheKey) obj).location)
&& partitionValues.equals(((FileCacheKey) obj).partitionValues);
}
@Override
public int hashCode() {
if (dummyKey != null) {
return Objects.hash(dummyKey);
}
return Objects.hash(location, partitionValues);
}
@Override
public String toString() {
return "FileCacheKey{" + "location='" + location + '\'' + ", inputFormat='" + inputFormat + '\'' + '}';
}
}
@Data
public static class FileCacheValue {
private List<HiveFileStatus> files;
private List<Split> splits;
private boolean isSplittable;
protected List<String> partitionValues;
private AcidInfo acidInfo;
public void addFile(RemoteFile file) {
if (files == null) {
files = Lists.newArrayList();
}
HiveFileStatus status = new HiveFileStatus();
status.setBlockLocations(file.getBlockLocations());
status.setPath(file.getPath());
status.length = file.getSize();
status.blockSize = file.getBlockSize();
status.modificationTime = file.getModificationTime();
files.add(status);
}
public void addSplit(Split split) {
if (splits == null) {
splits = Lists.newArrayList();
}
splits.add(split);
}
public int getValuesSize() {
return partitionValues == null ? 0 : partitionValues.size();
}
public AcidInfo getAcidInfo() {
return acidInfo;
}
public void setAcidInfo(AcidInfo acidInfo) {
this.acidInfo = acidInfo;
}
}
@Data
public static class HiveFileStatus {
BlockLocation[] blockLocations;
Path path;
long length;
long blockSize;
long modificationTime;
}
@Data
public static class HivePartitionValues {
private long nextPartitionId;
private Map<String, Long> partitionNameToIdMap;
private Map<Long, List<UniqueId>> idToUniqueIdsMap;
private Map<Long, PartitionItem> idToPartitionItem;
private Map<Long, List<String>> partitionValuesMap;
private Map<UniqueId, Range<PartitionKey>> uidToPartitionRange;
private Map<Range<PartitionKey>, UniqueId> rangeToId;
private RangeMap<ColumnBound, UniqueId> singleColumnRangeMap;
private Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap;
public HivePartitionValues() {
}
public HivePartitionValues(Map<Long, PartitionItem> idToPartitionItem,
Map<UniqueId, Range<PartitionKey>> uidToPartitionRange,
Map<Range<PartitionKey>, UniqueId> rangeToId,
RangeMap<ColumnBound, UniqueId> singleColumnRangeMap,
long nextPartitionId,
Map<String, Long> partitionNameToIdMap,
Map<Long, List<UniqueId>> idToUniqueIdsMap,
Map<UniqueId, Range<ColumnBound>> singleUidToColumnRangeMap,
Map<Long, List<String>> partitionValuesMap) {
this.idToPartitionItem = idToPartitionItem;
this.uidToPartitionRange = uidToPartitionRange;
this.rangeToId = rangeToId;
this.singleColumnRangeMap = singleColumnRangeMap;
this.nextPartitionId = nextPartitionId;
this.partitionNameToIdMap = partitionNameToIdMap;
this.idToUniqueIdsMap = idToUniqueIdsMap;
this.singleUidToColumnRangeMap = singleUidToColumnRangeMap;
this.partitionValuesMap = partitionValuesMap;
}
public HivePartitionValues copy() {
HivePartitionValues copy = new HivePartitionValues();
copy.setNextPartitionId(nextPartitionId);
copy.setPartitionNameToIdMap(partitionNameToIdMap == null ? null : Maps.newHashMap(partitionNameToIdMap));
copy.setIdToUniqueIdsMap(idToUniqueIdsMap == null ? null : Maps.newHashMap(idToUniqueIdsMap));
copy.setIdToPartitionItem(idToPartitionItem == null ? null : Maps.newHashMap(idToPartitionItem));
copy.setPartitionValuesMap(partitionValuesMap == null ? null : Maps.newHashMap(partitionValuesMap));
copy.setUidToPartitionRange(uidToPartitionRange == null ? null : Maps.newHashMap(uidToPartitionRange));
copy.setRangeToId(rangeToId == null ? null : Maps.newHashMap(rangeToId));
copy.setSingleUidToColumnRangeMap(
singleUidToColumnRangeMap == null ? null : Maps.newHashMap(singleUidToColumnRangeMap));
if (singleColumnRangeMap != null) {
RangeMap<ColumnBound, UniqueId> copySingleColumnRangeMap = TreeRangeMap.create();
copySingleColumnRangeMap.putAll(singleColumnRangeMap);
copy.setSingleColumnRangeMap(copySingleColumnRangeMap);
}
return copy;
}
}
}
|
Maybe this whole block could be simplified via ``` for (PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo> partitionTrackerEntry : partitionTrackerEntries) { final TaskExecutorPartitionInfo metaInfo = partitionTrackerEntry.getMetaInfo(); clusterPartitions.computeIfAbsent( metaInfo.getIntermediateDataSetId(), ignored -> new PartitionEntry(metaInfo.getNumberOfPartitions())) .addPartition(partitionTrackerEntry.getResultPartitionId()); } ``` If doing computations with side effects I would choose explicit loops instead of the stream API. For the stream API, computations should be side effect free.
|
public void promoteJobPartitions(Collection<ResultPartitionID> partitionsToPromote) {
if (partitionsToPromote.isEmpty()) {
return;
}
final Collection<PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo>> partitionTrackerEntries = stopTrackingPartitions(partitionsToPromote);
partitionTrackerEntries.forEach(
partitionTrackerEntry -> clusterPartitions.compute(partitionTrackerEntry.getMetaInfo().getIntermediateDataSetId(), (key, existingEntry) -> {
if (existingEntry == null) {
final Set<ResultPartitionID> newSet = new HashSet<>();
newSet.add(partitionTrackerEntry.getResultPartitionId());
return new PartitionEntry(newSet , partitionTrackerEntry.getMetaInfo().getNumberOfPartitions());
} else {
existingEntry.addPartition(partitionTrackerEntry.getResultPartitionId());
return existingEntry;
}
})
);
}
|
);
|
public void promoteJobPartitions(Collection<ResultPartitionID> partitionsToPromote) {
if (partitionsToPromote.isEmpty()) {
return;
}
final Collection<PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo>> partitionTrackerEntries = stopTrackingPartitions(partitionsToPromote);
for (PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo> partitionTrackerEntry : partitionTrackerEntries) {
final TaskExecutorPartitionInfo dataSetMetaInfo = partitionTrackerEntry.getMetaInfo();
final DataSetEntry dataSetEntry = clusterPartitions.computeIfAbsent(
dataSetMetaInfo.getIntermediateDataSetId(),
ignored -> new DataSetEntry(dataSetMetaInfo.getNumberOfPartitions()));
dataSetEntry.addPartition(partitionTrackerEntry.getResultPartitionId());
}
}
|
class TaskExecutorPartitionTrackerImpl extends AbstractPartitionTracker<JobID, TaskExecutorPartitionInfo> implements TaskExecutorPartitionTracker {
private final Map<IntermediateDataSetID, PartitionEntry> clusterPartitions = new HashMap<>();
private final ShuffleEnvironment<?, ?> shuffleEnvironment;
public TaskExecutorPartitionTrackerImpl(ShuffleEnvironment<?, ?> shuffleEnvironment) {
this.shuffleEnvironment = shuffleEnvironment;
}
@Override
public void startTrackingPartition(JobID producingJobId, TaskExecutorPartitionInfo partitionInfo) {
Preconditions.checkNotNull(producingJobId);
Preconditions.checkNotNull(partitionInfo);
startTrackingPartition(producingJobId, partitionInfo.getResultPartitionId(), partitionInfo);
}
@Override
public void stopTrackingAndReleaseJobPartitions(Collection<ResultPartitionID> partitionsToRelease) {
if (partitionsToRelease.isEmpty()) {
return;
}
stopTrackingPartitions(partitionsToRelease);
shuffleEnvironment.releasePartitionsLocally(partitionsToRelease);
}
@Override
public void stopTrackingAndReleaseJobPartitionsFor(JobID producingJobId) {
Collection<ResultPartitionID> partitionsForJob = CollectionUtil.project(
stopTrackingPartitionsFor(producingJobId),
PartitionTrackerEntry::getResultPartitionId);
shuffleEnvironment.releasePartitionsLocally(partitionsForJob);
}
@Override
@Override
public void stopTrackingAndReleaseAllClusterPartitions() {
clusterPartitions.values().stream().map(PartitionEntry::getPartitionIds).forEach(shuffleEnvironment::releasePartitionsLocally);
clusterPartitions.clear();
}
@Override
public ClusterPartitionReport createClusterPartitionReport() {
List<ClusterPartitionReport.ClusterPartitionReportEntry> reportEntries = clusterPartitions.entrySet().stream().map(entry ->
new ClusterPartitionReport.ClusterPartitionReportEntry(
entry.getKey(),
entry.getValue().getPartitionIds(),
entry.getValue().getTotalNumberOfPartitions()))
.collect(Collectors.toList());
return new ClusterPartitionReport(reportEntries);
}
private static class PartitionEntry {
private final Set<ResultPartitionID> partitionIds;
private final int totalNumberOfPartitions;
private PartitionEntry(Set<ResultPartitionID> partitionIds, int totalNumberOfPartitions) {
this.partitionIds = partitionIds;
this.totalNumberOfPartitions = totalNumberOfPartitions;
}
void addPartition(ResultPartitionID resultPartitionId) {
partitionIds.add(resultPartitionId);
}
public Set<ResultPartitionID> getPartitionIds() {
return partitionIds;
}
public int getTotalNumberOfPartitions() {
return totalNumberOfPartitions;
}
}
}
|
class TaskExecutorPartitionTrackerImpl extends AbstractPartitionTracker<JobID, TaskExecutorPartitionInfo> implements TaskExecutorPartitionTracker {
private final Map<IntermediateDataSetID, DataSetEntry> clusterPartitions = new HashMap<>();
private final ShuffleEnvironment<?, ?> shuffleEnvironment;
public TaskExecutorPartitionTrackerImpl(ShuffleEnvironment<?, ?> shuffleEnvironment) {
this.shuffleEnvironment = shuffleEnvironment;
}
@Override
public void startTrackingPartition(JobID producingJobId, TaskExecutorPartitionInfo partitionInfo) {
Preconditions.checkNotNull(producingJobId);
Preconditions.checkNotNull(partitionInfo);
startTrackingPartition(producingJobId, partitionInfo.getResultPartitionId(), partitionInfo);
}
@Override
public void stopTrackingAndReleaseJobPartitions(Collection<ResultPartitionID> partitionsToRelease) {
if (partitionsToRelease.isEmpty()) {
return;
}
stopTrackingPartitions(partitionsToRelease);
shuffleEnvironment.releasePartitionsLocally(partitionsToRelease);
}
@Override
public void stopTrackingAndReleaseJobPartitionsFor(JobID producingJobId) {
Collection<ResultPartitionID> partitionsForJob = CollectionUtil.project(
stopTrackingPartitionsFor(producingJobId),
PartitionTrackerEntry::getResultPartitionId);
shuffleEnvironment.releasePartitionsLocally(partitionsForJob);
}
@Override
@Override
public void stopTrackingAndReleaseClusterPartitions(Collection<IntermediateDataSetID> dataSetsToRelease) {
for (IntermediateDataSetID dataSetID : dataSetsToRelease) {
final DataSetEntry dataSetEntry = clusterPartitions.remove(dataSetID);
final Set<ResultPartitionID> partitionIds = dataSetEntry.getPartitionIds();
shuffleEnvironment.releasePartitionsLocally(partitionIds);
}
}
@Override
public void stopTrackingAndReleaseAllClusterPartitions() {
clusterPartitions.values().stream().map(DataSetEntry::getPartitionIds).forEach(shuffleEnvironment::releasePartitionsLocally);
clusterPartitions.clear();
}
@Override
public ClusterPartitionReport createClusterPartitionReport() {
List<ClusterPartitionReport.ClusterPartitionReportEntry> reportEntries = clusterPartitions.entrySet().stream().map(entry ->
new ClusterPartitionReport.ClusterPartitionReportEntry(
entry.getKey(),
entry.getValue().getPartitionIds(),
entry.getValue().getTotalNumberOfPartitions()))
.collect(Collectors.toList());
return new ClusterPartitionReport(reportEntries);
}
private static class DataSetEntry {
private final Set<ResultPartitionID> partitionIds = new HashSet<>();
private final int totalNumberOfPartitions;
private DataSetEntry(int totalNumberOfPartitions) {
this.totalNumberOfPartitions = totalNumberOfPartitions;
}
void addPartition(ResultPartitionID resultPartitionId) {
partitionIds.add(resultPartitionId);
}
public Set<ResultPartitionID> getPartitionIds() {
return partitionIds;
}
public int getTotalNumberOfPartitions() {
return totalNumberOfPartitions;
}
}
}
|
maybe a simple recursive algorithm is better
|
private boolean isBottomJoin(LogicalJoin join) {
if (join.left() instanceof LogicalProject) {
LogicalProject project = (LogicalProject) join.left();
if (project.child() instanceof LogicalJoin) {
return false;
}
}
if (join.right() instanceof LogicalProject) {
LogicalProject project = (LogicalProject) join.left();
if (project.child() instanceof LogicalJoin) {
return false;
}
}
if (join.left() instanceof LogicalJoin || join.right() instanceof LogicalJoin) {
return false;
}
return true;
}
|
}
|
private boolean isBottomJoin(LogicalJoin join) {
if (join.left() instanceof LogicalProject) {
LogicalProject project = (LogicalProject) join.left();
if (project.child() instanceof LogicalJoin) {
return false;
}
}
if (join.right() instanceof LogicalProject) {
LogicalProject project = (LogicalProject) join.left();
if (project.child() instanceof LogicalJoin) {
return false;
}
}
if (join.left() instanceof LogicalJoin || join.right() instanceof LogicalJoin) {
return false;
}
return true;
}
|
class JoinCommute extends OneExplorationRuleFactory {
private final SwapType swapType;
public JoinCommute() {
this.swapType = SwapType.ALL;
}
public JoinCommute(SwapType swapType) {
this.swapType = swapType;
}
enum SwapType {
BOTTOM_JOIN, ZIG_ZAG, ALL
}
@Override
public Rule build() {
return innerLogicalJoin(any(), any()).then(join -> {
if (!check(join)) {
return null;
}
boolean isBottomJoin = isBottomJoin(join);
if (swapType == SwapType.BOTTOM_JOIN && !isBottomJoin) {
return null;
}
LogicalJoin newJoin = new LogicalJoin(
join.getJoinType(),
join.getCondition(),
join.right(), join.left(),
join.getJoinReorderContext()
);
newJoin.getJoinReorderContext().setHasCommute(true);
if (swapType == SwapType.ZIG_ZAG && !isBottomJoin) {
newJoin.getJoinReorderContext().setHasCommuteZigZag(true);
}
return newJoin;
}).toRule(RuleType.LOGICAL_JOIN_COMMUTATIVE);
}
private boolean check(LogicalJoin join) {
if (join.getJoinReorderContext().hasCommute() || join.getJoinReorderContext().hasExchange()) {
return false;
}
return true;
}
}
|
class JoinCommute extends OneExplorationRuleFactory {
private final SwapType swapType;
private final boolean swapOuter;
public JoinCommute(boolean swapOuter) {
this.swapOuter = swapOuter;
this.swapType = SwapType.ALL;
}
public JoinCommute(boolean swapOuter, SwapType swapType) {
this.swapOuter = swapOuter;
this.swapType = swapType;
}
enum SwapType {
BOTTOM_JOIN, ZIG_ZAG, ALL
}
@Override
public Rule build() {
return innerLogicalJoin(any(), any()).then(join -> {
if (!check(join)) {
return null;
}
boolean isBottomJoin = isBottomJoin(join);
if (swapType == SwapType.BOTTOM_JOIN && !isBottomJoin) {
return null;
}
LogicalJoin newJoin = new LogicalJoin(
join.getJoinType(),
join.getCondition(),
join.right(), join.left(),
join.getJoinReorderContext()
);
newJoin.getJoinReorderContext().setHasCommute(true);
if (swapType == SwapType.ZIG_ZAG && !isBottomJoin) {
newJoin.getJoinReorderContext().setHasCommuteZigZag(true);
}
return newJoin;
}).toRule(RuleType.LOGICAL_JOIN_COMMUTATIVE);
}
private boolean check(LogicalJoin join) {
if (join.getJoinReorderContext().hasCommute() || join.getJoinReorderContext().hasExchange()) {
return false;
}
return true;
}
}
|
Just heard back: we do this when the swagger specification marks a field as `base64url`. I don't think it should be much of a performance issue, I wouldn't expect people to access this field an overwhelming amount of times.
|
public byte[] getData() {
if (this.data == null) {
return null;
}
return this.data.decodedBytes();
}
|
return this.data.decodedBytes();
|
public byte[] getData() {
return ByteExtensions.clone(this.data);
}
|
class KeyReleasePolicy {
/*
* Content type and version of key release policy.
*/
@JsonProperty(value = "contentType")
private String contentType;
/*
* Blob encoding the policy rules under which the key can be released.
*/
@JsonProperty(value = "data")
private Base64Url data;
/**
* Get the content type and version of key release policy.
*
* @return The content type and version of key release policy.
*/
public String getContentType() {
return this.contentType;
}
/**
* Set the content type and version of key release policy.
*
* <p>The service default is "application/json; charset=utf-8".</p>
*
* @param contentType The content type and version of key release policy to set.
*
* @return The updated {@link KeyReleasePolicy} object.
*/
public KeyReleasePolicy setContentType(String contentType) {
this.contentType = contentType;
return this;
}
/**
* Get a blob encoding the policy rules under which the key can be released.
*
* @return A blob encoding the policy rules under which the key can be released.
*/
/**
* Set a blob encoding the policy rules under which the key can be released.
*
* @param data A blob encoding the policy rules under which the key can be released.
*
* @return The updated {@link KeyReleasePolicy} object.
*/
public KeyReleasePolicy setData(byte[] data) {
if (data == null) {
this.data = null;
} else {
this.data = Base64Url.encode(CoreUtils.clone(data));
}
return this;
}
}
|
class KeyReleasePolicy {
/*
* Blob encoding the policy rules under which the key can be released.
*/
@JsonProperty(value = "data")
@JsonSerialize(using = Base64UrlJsonSerializer.class)
@JsonDeserialize(using = Base64UrlJsonDeserializer.class)
private byte[] data;
/*
* Content type and version of key release policy.
*/
@JsonProperty(value = "contentType")
private String contentType;
KeyReleasePolicy() {
}
/**
* Creates an instance of {@link KeyReleasePolicy}.
*
* @param data A blob encoding the policy rules under which the key can be released.
*/
public KeyReleasePolicy(byte[] data) {
Objects.requireNonNull(data, "'data' cannot be null.");
this.data = ByteExtensions.clone(data);
}
/**
* Get a blob encoding the policy rules under which the key can be released.
*
* @return A blob encoding the policy rules under which the key can be released.
*/
/**
* Get the content type and version of key release policy.
*
* @return The content type and version of key release policy.
*/
public String getContentType() {
return this.contentType;
}
/**
* Set the content type and version of key release policy.
*
* <p>The service default is "application/json; charset=utf-8".</p>
*
* @param contentType The content type and version of key release policy to set.
*
* @return The updated {@link KeyReleasePolicy} object.
*/
public KeyReleasePolicy setContentType(String contentType) {
this.contentType = contentType;
return this;
}
}
|
[SpotBugs-P1] Reliance on default encoding: Found a call to a method which will perform a byte to String (or String to byte) conversion, and will assume that the default platform encoding is suitable.
|
private void runSendLoop() {
synchronized(this.pendingSendsSyncLock) {
if (this.isSendLoopRunning) {
return;
} else {
this.isSendLoopRunning = true;
}
}
TRACE_LOGGER.debug("Starting requestResponseLink {} internal sender send loop", this.parent.linkPath);
try {
while(this.sendLink != null && this.sendLink.getLocalState() == EndpointState.ACTIVE && this.sendLink.getRemoteState() == EndpointState.ACTIVE && this.availableCredit.get() > 0) {
String requestIdToBeSent;
synchronized(pendingSendsSyncLock) {
requestIdToBeSent = this.pendingRetrySends.poll();
if (requestIdToBeSent == null) {
requestIdToBeSent = this.pendingFreshSends.poll();
if (requestIdToBeSent == null) {
this.isSendLoopRunning = false;
TRACE_LOGGER.debug("RequestResponseLink {} internal sender send loop ending as there are no more requests enqueued.", this.parent.linkPath);
break;
}
}
}
RequestResponseWorkItem requestToBeSent = this.parent.pendingRequests.get(requestIdToBeSent);
if (requestToBeSent != null) {
Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString().getBytes(UTF_8));
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
TransactionContext transaction = requestToBeSent.getTransaction();
if (transaction != TransactionContext.NULL_TXN) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transaction.getTransactionId().array()));
delivery.disposition(transactionalState);
}
Pair<byte[], Integer> encodedPair = null;
try {
encodedPair = Util.encodeMessageToOptimalSizeArray(requestToBeSent.getRequest(), this.maxMessageSize);
} catch (PayloadSizeExceededException exception) {
this.parent.exceptionallyCompleteRequest((String)requestToBeSent.getRequest().getMessageId(), new PayloadSizeExceededException(String.format("Size of the payload exceeded Maximum message size: %s kb", this.maxMessageSize / 1024), exception), false);
}
try {
int sentMsgSize = this.sendLink.send(encodedPair.getFirstItem(), 0, encodedPair.getSecondItem());
assert sentMsgSize == encodedPair.getSecondItem() : "Contract of the ProtonJ library for Sender.Send API changed";
delivery.settle();
this.availableCredit.decrementAndGet();
TRACE_LOGGER.debug("RequestResonseLink {} internal sender sent a request. available credit :{}", this.parent.linkPath, this.availableCredit.get());
} catch(Exception e) {
TRACE_LOGGER.error("RequestResonseLink {} failed to send request with request id:{}.", this.parent.linkPath, requestIdToBeSent, e);
this.parent.exceptionallyCompleteRequest(requestIdToBeSent, e, false);
}
} else {
TRACE_LOGGER.warn("Request with id:{} not found in the requestresponse link.", requestIdToBeSent);
}
}
} finally {
synchronized (this.pendingSendsSyncLock) {
if (this.isSendLoopRunning) {
this.isSendLoopRunning = false;
}
}
TRACE_LOGGER.debug("RequestResponseLink {} internal sender send loop stopped.", this.parent.linkPath);
}
}
|
Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString().getBytes(UTF_8));
|
private void runSendLoop() {
synchronized (this.pendingSendsSyncLock) {
if (this.isSendLoopRunning) {
return;
} else {
this.isSendLoopRunning = true;
}
}
TRACE_LOGGER.debug("Starting requestResponseLink {} internal sender send loop", this.parent.linkPath);
try {
while (this.sendLink != null && this.sendLink.getLocalState() == EndpointState.ACTIVE && this.sendLink.getRemoteState() == EndpointState.ACTIVE && this.availableCredit.get() > 0) {
String requestIdToBeSent;
synchronized (pendingSendsSyncLock) {
requestIdToBeSent = this.pendingRetrySends.poll();
if (requestIdToBeSent == null) {
requestIdToBeSent = this.pendingFreshSends.poll();
if (requestIdToBeSent == null) {
this.isSendLoopRunning = false;
TRACE_LOGGER.debug("RequestResponseLink {} internal sender send loop ending as there are no more requests enqueued.", this.parent.linkPath);
break;
}
}
}
RequestResponseWorkItem requestToBeSent = this.parent.pendingRequests.get(requestIdToBeSent);
if (requestToBeSent != null) {
Delivery delivery = this.sendLink.delivery(UUID.randomUUID().toString().getBytes(UTF_8));
delivery.setMessageFormat(DeliveryImpl.DEFAULT_MESSAGE_FORMAT);
TransactionContext transaction = requestToBeSent.getTransaction();
if (transaction != TransactionContext.NULL_TXN) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transaction.getTransactionId().array()));
delivery.disposition(transactionalState);
}
Pair<byte[], Integer> encodedPair = null;
try {
encodedPair = Util.encodeMessageToOptimalSizeArray(requestToBeSent.getRequest(), this.maxMessageSize);
} catch (PayloadSizeExceededException exception) {
this.parent.exceptionallyCompleteRequest((String)requestToBeSent.getRequest().getMessageId(), new PayloadSizeExceededException(String.format("Size of the payload exceeded Maximum message size: %s kb", this.maxMessageSize / 1024), exception), false);
}
try {
int sentMsgSize = this.sendLink.send(encodedPair.getFirstItem(), 0, encodedPair.getSecondItem());
assert sentMsgSize == encodedPair.getSecondItem() : "Contract of the ProtonJ library for Sender.Send API changed";
delivery.settle();
this.availableCredit.decrementAndGet();
TRACE_LOGGER.debug("RequestResonseLink {} internal sender sent a request. available credit :{}", this.parent.linkPath, this.availableCredit.get());
} catch(Exception e) {
TRACE_LOGGER.error("RequestResonseLink {} failed to send request with request id:{}.", this.parent.linkPath, requestIdToBeSent, e);
this.parent.exceptionallyCompleteRequest(requestIdToBeSent, e, false);
}
} else {
TRACE_LOGGER.warn("Request with id:{} not found in the requestresponse link.", requestIdToBeSent);
}
}
} finally {
synchronized (this.pendingSendsSyncLock) {
if (this.isSendLoopRunning) {
this.isSendLoopRunning = false;
}
}
TRACE_LOGGER.debug("RequestResponseLink {} internal sender send loop stopped.", this.parent.linkPath);
}
}
|
class InternalSender extends ClientEntity implements IAmqpSender {
private Sender sendLink;
private Receiver matchingReceiveLink;
private RequestResponseLink parent;
private CompletableFuture<Void> openFuture;
private CompletableFuture<Void> closeFuture;
private AtomicInteger availableCredit;
private LinkedList<String> pendingFreshSends;
private LinkedList<String> pendingRetrySends;
private Object pendingSendsSyncLock;
private boolean isSendLoopRunning;
private int maxMessageSize;
private int linkGeneration;
protected InternalSender(String clientId, RequestResponseLink parent, InternalSender senderToBeCopied) {
super(clientId);
this.parent = parent;
this.linkGeneration = parent.internalLinkGeneration;
this.availableCredit = new AtomicInteger(0);
this.pendingSendsSyncLock = new Object();
this.isSendLoopRunning = false;
this.openFuture = new CompletableFuture<Void>();
this.closeFuture = new CompletableFuture<Void>();
if (senderToBeCopied == null) {
this.pendingFreshSends = new LinkedList<>();
this.pendingRetrySends = new LinkedList<>();
} else {
this.pendingFreshSends = senderToBeCopied.pendingFreshSends;
this.pendingRetrySends = senderToBeCopied.pendingRetrySends;
}
}
@Override
protected CompletableFuture<Void> onClose() {
this.closeInternals(true);
return this.closeFuture;
}
void closeInternals(boolean waitForCloseCompletion) {
if (!this.getIsClosed()) {
if (this.sendLink != null && this.sendLink.getLocalState() != EndpointState.CLOSED) {
try {
this.parent.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() {
@Override
public void onEvent() {
if (InternalSender.this.sendLink != null && InternalSender.this.sendLink.getLocalState() != EndpointState.CLOSED) {
TRACE_LOGGER.debug("Closing internal send link of requestresponselink to {}", RequestResponseLink.this.linkPath);
InternalSender.this.sendLink.close();
InternalSender.this.parent.underlyingFactory.deregisterForConnectionError(InternalSender.this.sendLink);
if (waitForCloseCompletion) {
RequestResponseLink.scheduleLinkCloseTimeout(InternalSender.this.closeFuture, InternalSender.this.parent.underlyingFactory.getOperationTimeout(), InternalSender.this.sendLink.getName());
} else {
AsyncUtil.completeFuture(InternalSender.this.closeFuture, null);
}
}
}
});
} catch (IOException e) {
AsyncUtil.completeFutureExceptionally(this.closeFuture, e);
}
} else {
AsyncUtil.completeFuture(this.closeFuture, null);
}
}
}
@Override
public void onOpenComplete(Exception completionException) {
if (completionException == null) {
TRACE_LOGGER.debug("Opened internal send link of requestresponselink to {}", parent.linkPath);
this.maxMessageSize = Util.getMaxMessageSizeFromLink(this.sendLink);
AsyncUtil.completeFuture(this.openFuture, null);
this.runSendLoop();
} else {
TRACE_LOGGER.error("Opening internal send link '{}' of requestresponselink to {} failed.", this.sendLink.getName(), this.parent.linkPath, completionException);
this.setClosed();
AsyncUtil.completeFuture(this.closeFuture, null);
AsyncUtil.completeFutureExceptionally(this.openFuture, completionException);
}
}
@Override
public void onError(Exception exception) {
if (!this.openFuture.isDone()) {
this.onOpenComplete(exception);
}
if (this.getIsClosingOrClosed()) {
if (!this.closeFuture.isDone()) {
TRACE_LOGGER.error("Closing internal send link '{}' of requestresponselink to {} failed.", this.sendLink.getName(), this.parent.linkPath, exception);
AsyncUtil.completeFutureExceptionally(this.closeFuture, exception);
}
} else {
TRACE_LOGGER.warn("Internal send link '{}' of requestresponselink to '{}' encountered error.", this.sendLink.getName(), this.parent.linkPath, exception);
this.parent.underlyingFactory.deregisterForConnectionError(this.sendLink);
this.matchingReceiveLink.close();
this.parent.underlyingFactory.deregisterForConnectionError(this.matchingReceiveLink);
this.parent.onInnerLinksClosed(this.linkGeneration, exception);
}
}
@Override
public void onClose(ErrorCondition condition) {
if (condition == null || condition.getCondition() == null) {
if (!this.closeFuture.isDone() && !this.closeFuture.isDone()) {
TRACE_LOGGER.info("Closed internal send link of requestresponselink to {}", this.parent.linkPath);
AsyncUtil.completeFuture(this.closeFuture, null);
}
} else {
Exception exception = ExceptionUtil.toException(condition);
this.onError(exception);
}
}
public void sendRequest(String requestId, boolean isRetry) {
synchronized(this.pendingSendsSyncLock) {
if (isRetry) {
this.pendingRetrySends.add(requestId);
} else {
this.pendingFreshSends.add(requestId);
}
if (this.isSendLoopRunning) {
return;
}
}
try {
this.parent.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() {
@Override
public void onEvent() {
InternalSender.this.runSendLoop();
}
});
} catch (IOException e) {
this.parent.exceptionallyCompleteRequest(requestId, e, true);
}
}
public void removeEnqueuedRequest(String requestId, boolean isRetry) {
synchronized(this.pendingSendsSyncLock) {
if (isRetry) {
this.pendingRetrySends.remove(requestId);
} else {
this.pendingFreshSends.remove(requestId);
}
}
}
@Override
public void onFlow(int creditIssued) {
TRACE_LOGGER.debug("RequestResonseLink {} internal sender received credit :{}", this.parent.linkPath, creditIssued);
this.availableCredit.addAndGet(creditIssued);
TRACE_LOGGER.debug("RequestResonseLink {} internal sender available credit :{}", this.parent.linkPath, this.availableCredit.get());
this.runSendLoop();
}
@Override
public void onSendComplete(Delivery delivery) {
}
public void setLinks(Sender sendLink, Receiver receiveLink) {
this.sendLink = sendLink;
this.matchingReceiveLink = receiveLink;
this.availableCredit = new AtomicInteger(0);
}
}
|
class InternalSender extends ClientEntity implements IAmqpSender {
private Sender sendLink;
private Receiver matchingReceiveLink;
private RequestResponseLink parent;
private CompletableFuture<Void> openFuture;
private CompletableFuture<Void> closeFuture;
private AtomicInteger availableCredit;
private LinkedList<String> pendingFreshSends;
private LinkedList<String> pendingRetrySends;
private Object pendingSendsSyncLock;
private boolean isSendLoopRunning;
private int maxMessageSize;
private int linkGeneration;
protected InternalSender(String clientId, RequestResponseLink parent, InternalSender senderToBeCopied) {
super(clientId);
this.parent = parent;
this.linkGeneration = parent.internalLinkGeneration;
this.availableCredit = new AtomicInteger(0);
this.pendingSendsSyncLock = new Object();
this.isSendLoopRunning = false;
this.openFuture = new CompletableFuture<Void>();
this.closeFuture = new CompletableFuture<Void>();
if (senderToBeCopied == null) {
this.pendingFreshSends = new LinkedList<>();
this.pendingRetrySends = new LinkedList<>();
} else {
this.pendingFreshSends = senderToBeCopied.pendingFreshSends;
this.pendingRetrySends = senderToBeCopied.pendingRetrySends;
}
}
@Override
protected CompletableFuture<Void> onClose() {
this.closeInternals(true);
return this.closeFuture;
}
void closeInternals(boolean waitForCloseCompletion) {
if (!this.getIsClosed()) {
if (this.sendLink != null && this.sendLink.getLocalState() != EndpointState.CLOSED) {
try {
this.parent.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() {
@Override
public void onEvent() {
if (InternalSender.this.sendLink != null && InternalSender.this.sendLink.getLocalState() != EndpointState.CLOSED) {
TRACE_LOGGER.debug("Closing internal send link of requestresponselink to {}", RequestResponseLink.this.linkPath);
InternalSender.this.sendLink.close();
InternalSender.this.parent.underlyingFactory.deregisterForConnectionError(InternalSender.this.sendLink);
if (waitForCloseCompletion) {
RequestResponseLink.scheduleLinkCloseTimeout(InternalSender.this.closeFuture, InternalSender.this.parent.underlyingFactory.getOperationTimeout(), InternalSender.this.sendLink.getName());
} else {
AsyncUtil.completeFuture(InternalSender.this.closeFuture, null);
}
}
}
});
} catch (IOException e) {
AsyncUtil.completeFutureExceptionally(this.closeFuture, e);
}
} else {
AsyncUtil.completeFuture(this.closeFuture, null);
}
}
}
@Override
public void onOpenComplete(Exception completionException) {
if (completionException == null) {
TRACE_LOGGER.debug("Opened internal send link of requestresponselink to {}", parent.linkPath);
this.maxMessageSize = Util.getMaxMessageSizeFromLink(this.sendLink);
AsyncUtil.completeFuture(this.openFuture, null);
this.runSendLoop();
} else {
TRACE_LOGGER.error("Opening internal send link '{}' of requestresponselink to {} failed.", this.sendLink.getName(), this.parent.linkPath, completionException);
this.setClosed();
AsyncUtil.completeFuture(this.closeFuture, null);
AsyncUtil.completeFutureExceptionally(this.openFuture, completionException);
}
}
@Override
public void onError(Exception exception) {
if (!this.openFuture.isDone()) {
this.onOpenComplete(exception);
}
if (this.getIsClosingOrClosed()) {
if (!this.closeFuture.isDone()) {
TRACE_LOGGER.error("Closing internal send link '{}' of requestresponselink to {} failed.", this.sendLink.getName(), this.parent.linkPath, exception);
AsyncUtil.completeFutureExceptionally(this.closeFuture, exception);
}
} else {
TRACE_LOGGER.warn("Internal send link '{}' of requestresponselink to '{}' encountered error.", this.sendLink.getName(), this.parent.linkPath, exception);
this.parent.underlyingFactory.deregisterForConnectionError(this.sendLink);
this.matchingReceiveLink.close();
this.parent.underlyingFactory.deregisterForConnectionError(this.matchingReceiveLink);
this.parent.onInnerLinksClosed(this.linkGeneration, exception);
}
}
@Override
public void onClose(ErrorCondition condition) {
if (condition == null || condition.getCondition() == null) {
if (!this.closeFuture.isDone() && !this.closeFuture.isDone()) {
TRACE_LOGGER.info("Closed internal send link of requestresponselink to {}", this.parent.linkPath);
AsyncUtil.completeFuture(this.closeFuture, null);
}
} else {
Exception exception = ExceptionUtil.toException(condition);
this.onError(exception);
}
}
public void sendRequest(String requestId, boolean isRetry) {
synchronized (this.pendingSendsSyncLock) {
if (isRetry) {
this.pendingRetrySends.add(requestId);
} else {
this.pendingFreshSends.add(requestId);
}
if (this.isSendLoopRunning) {
return;
}
}
try {
this.parent.underlyingFactory.scheduleOnReactorThread(new DispatchHandler() {
@Override
public void onEvent() {
InternalSender.this.runSendLoop();
}
});
} catch (IOException e) {
this.parent.exceptionallyCompleteRequest(requestId, e, true);
}
}
public void removeEnqueuedRequest(String requestId, boolean isRetry) {
synchronized (this.pendingSendsSyncLock) {
if (isRetry) {
this.pendingRetrySends.remove(requestId);
} else {
this.pendingFreshSends.remove(requestId);
}
}
}
@Override
public void onFlow(int creditIssued) {
TRACE_LOGGER.debug("RequestResonseLink {} internal sender received credit :{}", this.parent.linkPath, creditIssued);
this.availableCredit.addAndGet(creditIssued);
TRACE_LOGGER.debug("RequestResonseLink {} internal sender available credit :{}", this.parent.linkPath, this.availableCredit.get());
this.runSendLoop();
}
@Override
public void onSendComplete(Delivery delivery) {
}
public void setLinks(Sender sendLink, Receiver receiveLink) {
this.sendLink = sendLink;
this.matchingReceiveLink = receiveLink;
this.availableCredit = new AtomicInteger(0);
}
}
|
maybe we need to check left child and right child slots on the different side of the equation
|
public Void visitLogicalFilter(LogicalFilter<Plan> filter, Void context) {
Plan child = filter.child();
if (child instanceof LogicalJoin) {
conjuncts.addAll(ExpressionUtils.extractConjunct(filter.getPredicates()));
}
child.accept(this, context);
return null;
}
|
conjuncts.addAll(ExpressionUtils.extractConjunct(filter.getPredicates()));
|
public Void visitLogicalFilter(LogicalFilter<Plan> filter, Void context) {
Plan child = filter.child();
if (child instanceof LogicalJoin) {
conjuncts.addAll(ExpressionUtils.extractConjunct(filter.getPredicates()));
}
child.accept(this, context);
return null;
}
|
class PlanCollector extends PlanVisitor<Void, Void> {
public final List<Plan> joinInputs = new ArrayList<>();
public final List<Expression> conjuncts = new ArrayList<>();
@Override
public Void visit(Plan plan, Void context) {
for (Plan child : plan.children()) {
child.accept(this, context);
}
return null;
}
@Override
@Override
public Void visitLogicalJoin(LogicalJoin<Plan, Plan> join, Void context) {
if (join.getJoinType() != JoinType.CROSS_JOIN && join.getJoinType() != JoinType.INNER_JOIN) {
return null;
}
join.left().accept(this, context);
join.right().accept(this, context);
join.getCondition().ifPresent(cond -> conjuncts.addAll(ExpressionUtils.extractConjunct(cond)));
if (!(join.left() instanceof LogicalJoin)) {
joinInputs.add(join.left());
}
if (!(join.right() instanceof LogicalJoin)) {
joinInputs.add(join.right());
}
return null;
}
}
|
class PlanCollector extends PlanVisitor<Void, Void> {
public final List<Plan> joinInputs = new ArrayList<>();
public final List<Expression> conjuncts = new ArrayList<>();
@Override
public Void visit(Plan plan, Void context) {
for (Plan child : plan.children()) {
child.accept(this, context);
}
return null;
}
@Override
@Override
public Void visitLogicalJoin(LogicalJoin<Plan, Plan> join, Void context) {
if (join.getJoinType() != JoinType.CROSS_JOIN && join.getJoinType() != JoinType.INNER_JOIN) {
return null;
}
join.left().accept(this, context);
join.right().accept(this, context);
join.getCondition().ifPresent(cond -> conjuncts.addAll(ExpressionUtils.extractConjunct(cond)));
if (!(join.left() instanceof LogicalJoin)) {
joinInputs.add(join.left());
}
if (!(join.right() instanceof LogicalJoin)) {
joinInputs.add(join.right());
}
return null;
}
}
|
and then I will do the check at this line.
|
public BinaryStringData next() {
if (!hasNext()) {
return null;
}
final Object str = elementGetter.getElementOrNull(arrayData, currentPos);
currentPos++;
if (str == null && nullReplacement.length != 0 && nullReplacement[0] != null) {
return (BinaryStringData) nullReplacement[0];
}
return (BinaryStringData) str;
}
|
if (str == null && nullReplacement.length != 0 && nullReplacement[0] != null) {
|
public BinaryStringData next() {
if (!hasNext()) {
return null;
}
final Object str = elementGetter.getElementOrNull(arrayData, currentPos);
currentPos++;
if (str == null && nullReplacement != null) {
return nullReplacement;
}
return (BinaryStringData) str;
}
|
class ArrayIterator implements Iterator<BinaryStringData> {
private final int size;
private final ArrayData.ElementGetter elementGetter;
private final ArrayData arrayData;
private final StringData[] nullReplacement;
private int currentPos;
public ArrayIterator(
ArrayData.ElementGetter elementGetter,
ArrayData arrayData,
StringData[] nullReplacement) {
this.size = arrayData.size();
this.elementGetter = elementGetter;
this.arrayData = arrayData;
this.nullReplacement = nullReplacement;
}
@Override
public boolean hasNext() {
return currentPos < size;
}
@Override
}
|
class ArrayIterator implements Iterator<BinaryStringData> {
private final int size;
private final ArrayData.ElementGetter elementGetter;
private final ArrayData arrayData;
private final BinaryStringData nullReplacement;
private int currentPos;
public ArrayIterator(
ArrayData.ElementGetter elementGetter,
ArrayData arrayData,
BinaryStringData nullReplacement) {
this.size = arrayData.size();
this.elementGetter = elementGetter;
this.arrayData = arrayData;
this.nullReplacement = nullReplacement;
}
@Override
public boolean hasNext() {
return currentPos < size;
}
@Override
}
|
I'll add `e` to the `warnf`.
|
protected Manifest readManifest() {
final ClassPathResource mf = getResource("META-INF/MANIFEST.MF");
if (mf != null) {
try {
return new Manifest(new ByteArrayInputStream(mf.getData()));
} catch (IOException e) {
log.warnf("Failed to parse manifest for %s", toString());
}
}
return null;
}
|
log.warnf("Failed to parse manifest for %s", toString());
|
protected Manifest readManifest() {
final ClassPathResource mf = getResource("META-INF/MANIFEST.MF");
if (mf != null) {
try {
return new Manifest(new ByteArrayInputStream(mf.getData()));
} catch (IOException e) {
log.warnf("Failed to parse manifest for %s", toString(), e);
}
}
return null;
}
|
class AbstractClassPathElement implements ClassPathElement {
private static final Logger log = Logger.getLogger(AbstractClassPathElement.class);
protected volatile Manifest manifest;
protected volatile boolean manifestInitialized = false;
@Override
public Manifest getManifest() {
if (manifestInitialized) {
return manifest;
}
synchronized (this) {
if (manifestInitialized) {
return manifest;
}
manifest = readManifest();
manifestInitialized = true;
}
return manifest;
}
}
|
class AbstractClassPathElement implements ClassPathElement {
private static final Logger log = Logger.getLogger(AbstractClassPathElement.class);
protected volatile Manifest manifest;
protected volatile boolean manifestInitialized = false;
@Override
public Manifest getManifest() {
if (manifestInitialized) {
return manifest;
}
synchronized (this) {
if (manifestInitialized) {
return manifest;
}
manifest = readManifest();
manifestInitialized = true;
}
return manifest;
}
}
|
```suggestion throw new AlterCancelException("Database " + dbId + " does not exist"); ```
|
protected void runWaitingTxnJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState);
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
List<String> tmpPartitionNames;
List<String> partitionNames = Lists.newArrayList();
List<String> tableColumnNames = Lists.newArrayList();
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Databasee " + dbId + " does not exist");
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
dbName = db.getFullName();
OlapTable tbl = checkAndGetTable(db, tableId);
if (getTmpPartitionIds().stream().anyMatch(id -> tbl.getPartition(id) == null)) {
throw new AlterCancelException("partitions changed during insert");
}
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> tbl.getPartition(partitionId).getName())
.collect(Collectors.toList());
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> tbl.getPartition(partitionId)).forEach(
partition -> {
partitionNames.add(partition.getName());
}
);
tableColumnNames = tbl.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn())
.map(col -> ParseUtil.backquote(col.getName())).collect(Collectors.toList());
} finally {
locker.unLockDatabase(db, LockType.READ);
}
for (int i = 0; i < tmpPartitionNames.size(); ++i) {
String tmpPartitionName = tmpPartitionNames.get(i);
String partitionName = partitionNames.get(i);
String rewriteSql = "insert into " + dbName + "." + tableName + " TEMPORARY PARTITION ("
+ tmpPartitionName + ") select " + Joiner.on(", ").join(tableColumnNames)
+ " from " + dbName + "." + tableName + " partition (" + partitionName + ")";
String taskName = getName() + "_" + tmpPartitionName;
OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName);
rewriteTask.setPartitionName(partitionName);
rewriteTask.setTempPartitionName(tmpPartitionName);
rewriteTasks.add(rewriteTask);
}
this.jobState = JobState.RUNNING;
span.addEvent("setRunning");
LOG.info("transfer optimize job {} state to {}", jobId, this.jobState);
}
|
throw new AlterCancelException("Databasee " + dbId + " does not exist");
|
protected void runWaitingTxnJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState);
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
List<String> tmpPartitionNames;
List<String> partitionNames = Lists.newArrayList();
List<String> tableColumnNames = Lists.newArrayList();
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
dbName = db.getFullName();
OlapTable tbl = checkAndGetTable(db, tableId);
if (getTmpPartitionIds().stream().anyMatch(id -> tbl.getPartition(id) == null)) {
throw new AlterCancelException("partitions changed during insert");
}
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> tbl.getPartition(partitionId).getName())
.collect(Collectors.toList());
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> tbl.getPartition(partitionId)).forEach(
partition -> {
partitionNames.add(partition.getName());
}
);
tableColumnNames = tbl.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn())
.map(col -> ParseUtil.backquote(col.getName())).collect(Collectors.toList());
} finally {
locker.unLockDatabase(db, LockType.READ);
}
for (int i = 0; i < tmpPartitionNames.size(); ++i) {
String tmpPartitionName = tmpPartitionNames.get(i);
String partitionName = partitionNames.get(i);
String rewriteSql = "insert into " + dbName + "." + tableName + " TEMPORARY PARTITION ("
+ tmpPartitionName + ") select " + Joiner.on(", ").join(tableColumnNames)
+ " from " + dbName + "." + tableName + " partition (" + partitionName + ")";
String taskName = getName() + "_" + tmpPartitionName;
OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName);
rewriteTask.setPartitionName(partitionName);
rewriteTask.setTempPartitionName(tmpPartitionName);
rewriteTasks.add(rewriteTask);
}
this.jobState = JobState.RUNNING;
span.addEvent("setRunning");
LOG.info("transfer optimize job {} state to {}", jobId, this.jobState);
}
|
class OnlineOptimizeJobV2 extends AlterJobV2 implements GsonPostProcessable {
private static final Logger LOG = LogManager.getLogger(OnlineOptimizeJobV2.class);
@SerializedName(value = "watershedTxnId")
protected long watershedTxnId = -1;
private final String postfix;
private static final ExecutorService EXECUTOR = Executors.newCachedThreadPool();
private Future<Constants.TaskRunState> future = null;
@SerializedName(value = "tmpPartitionIds")
private List<Long> tmpPartitionIds = Lists.newArrayList();
private OptimizeClause optimizeClause;
private String dbName = "";
private Map<String, String> properties = Maps.newHashMap();
@SerializedName(value = "rewriteTasks")
private List<OptimizeTask> rewriteTasks = Lists.newArrayList();
private int progress = 0;
@SerializedName(value = "sourcePartitionNames")
private List<String> sourcePartitionNames = Lists.newArrayList();
@SerializedName(value = "tmpPartitionNames")
private List<String> tmpPartitionNames = Lists.newArrayList();
@SerializedName(value = "allPartitionOptimized")
private Boolean allPartitionOptimized = false;
@SerializedName(value = "distributionInfo")
private DistributionInfo distributionInfo;
@SerializedName(value = "optimizeOperation")
private String optimizeOperation = "";
public OnlineOptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs,
OptimizeClause optimizeClause) {
this(jobId, dbId, tableId, tableName, timeoutMs);
this.optimizeClause = optimizeClause;
}
public OnlineOptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) {
super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs);
this.postfix = "_" + jobId;
}
public List<Long> getTmpPartitionIds() {
return tmpPartitionIds;
}
public void setTmpPartitionIds(List<Long> tmpPartitionIds) {
this.tmpPartitionIds = tmpPartitionIds;
}
public String getName() {
return "optimize-" + this.postfix;
}
public Map<String, String> getProperties() {
return properties;
}
public List<OptimizeTask> getOptimizeTasks() {
return rewriteTasks;
}
private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
return (OlapTable) table;
}
/**
* runPendingJob():
* 1. Create all temp partitions and wait them finished.
* 2. Get a new transaction id, then set job's state to WAITING_TXN
*/
@Override
protected void runPendingJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.PENDING, jobState);
LOG.info("begin to send create temp partitions. job: {}", jobId);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
if (!checkTableStable(db)) {
return;
}
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
if (optimizeClause.isTableOptimize()) {
allPartitionOptimized = true;
}
for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) {
tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId());
}
long createPartitionStartTimestamp = System.currentTimeMillis();
OlapTable targetTable;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
targetTable = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
try {
PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix,
optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc(),
warehouseId);
LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId);
} catch (Exception e) {
LOG.warn("create temp partitions failed", e);
throw new AlterCancelException("create temp partitions failed " + e);
}
long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp;
this.jobState = JobState.WAITING_TXN;
this.optimizeOperation = optimizeClause.toString();
span.setAttribute("createPartitionElapse", createPartitionElapse);
span.setAttribute("watershedTxnId", this.watershedTxnId);
span.addEvent("setWaitingTxn");
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId);
}
/**
* runWaitingTxnJob():
* 1. Wait the transactions before the watershedTxnId to be finished.
* 2. If all previous transactions finished, start insert into data to temp partitions.
* 3. Change job state to RUNNING.
*/
@Override
private void enableDoubleWritePartition(Database db, OlapTable tbl, String sourcePartitionName, String tmpPartitionName) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
Preconditions.checkState(tbl.getState() == OlapTableState.SCHEMA_CHANGE);
tbl.addDoubleWritePartition(sourcePartitionName, tmpPartitionName);
LOG.info("job {} add double write partition {} to {}", jobId, tmpPartitionName, sourcePartitionName);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
private void disableDoubleWritePartition(Database db, OlapTable tbl) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
Preconditions.checkState(tbl.getState() == OlapTableState.SCHEMA_CHANGE);
tbl.clearDoubleWritePartition();
LOG.info("job {} clear double write partitions", jobId);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
/**
* runRunningJob()
* 1. Wait insert into tasks to be finished.
* 2. Replace partitions with temp partitions.
* 3. Set job'state as FINISHED.
*/
@Override
protected void runRunningJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.RUNNING, jobState);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Databasee " + dbId + " does not exist");
}
OlapTable tbl;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
tbl = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
int progress = 0;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) {
progress += 100 / rewriteTasks.size();
continue;
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.PENDING) {
enableDoubleWritePartition(db, tbl, rewriteTask.getPartitionName(), rewriteTask.getTempPartitionName());
this.watershedTxnId = GlobalStateMgr.getCurrentState()
.getGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.RUNNING);
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.RUNNING) {
try {
if (!isPreviousLoadFinished()) {
LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId);
return;
}
} catch (AnalysisException e) {
throw new AlterCancelException(e.getMessage());
}
if (this.future != null) {
if (this.future.isDone()) {
try {
rewriteTask.setOptimizeTaskState(future.get());
} catch (InterruptedException | ExecutionException e) {
LOG.warn("get rewrite task result failed", e);
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED) {
this.allPartitionOptimized = false;
}
this.future = null;
} else {
LOG.info("wait rewrite task {} to be finished, optimize job: {}", rewriteTask.getName(), jobId);
return;
}
} else {
LOG.info("previous transactions are all finished, begin to optimize task {}. job: {}",
rewriteTask.toString(), jobId);
Callable<Constants.TaskRunState> task = () -> {
try {
executeSql(rewriteTask.getDefinition());
LOG.info("finish rewrite task: {}", rewriteTask.getName());
return Constants.TaskRunState.SUCCESS;
} catch (Exception e) {
LOG.warn("create rewrite task failed", e);
return Constants.TaskRunState.FAILED;
}
};
this.future = EXECUTOR.submit(task);
return;
}
}
locker.lockDatabase(db, LockType.WRITE);
try {
onTaskFinished(db, tbl, rewriteTask);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
locker.lockDatabase(db, LockType.WRITE);
try {
onFinished(db, tbl);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId);
this.progress = 100;
this.jobState = JobState.FINISHED;
this.finishedTimeMs = System.currentTimeMillis();
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("optimize job finished: {}", jobId);
this.span.end();
}
@Override
protected void runFinishedRewritingJob() {
}
private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException {
try {
if (allPartitionOptimized && optimizeClause.getDistributionDesc() != null) {
this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns());
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("optimize table failed dbId:{}, tableId:{} exception: {}", dbId, tableId, e);
throw new AlterCancelException("optimize table failed " + e.getMessage());
}
}
private void onTaskFinished(Database db, OlapTable targetTable, OptimizeTask rewriteTask) throws AlterCancelException {
try {
String sourcePartitionName = rewriteTask.getPartitionName();
String tmpPartitionName = rewriteTask.getTempPartitionName();
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED) {
LOG.info("optimize job {} rewrite task {} state {} failed or partition {}",
jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), sourcePartitionName);
targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true);
throw new AlterCancelException(sourcePartitionName + " rewrite task execute failed");
}
Set<Tablet> sourceTablets = Sets.newHashSet();
Partition partition = targetTable.getPartition(sourcePartitionName);
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
PartitionInfo partitionInfo = targetTable.getPartitionInfo();
if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) {
targetTable.replaceTempPartitions(
Arrays.asList(sourcePartitionName), Arrays.asList(tmpPartitionName), true, false);
} else if (partitionInfo instanceof SinglePartitionInfo) {
targetTable.replacePartition(sourcePartitionName, tmpPartitionName);
} else {
throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported");
}
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(),
Arrays.asList(sourcePartitionName), Arrays.asList(tmpPartitionName),
true, false, partitionInfo instanceof SinglePartitionInfo);
GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info);
sourceTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
try {
GlobalStateMgr.getCurrentState().getColocateTableIndex().updateLakeTableColocationInfo(targetTable,
true /* isJoin */, null /* expectGroupId */);
} catch (DdlException e) {
LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage());
}
targetTable.lastSchemaUpdateTime.set(System.nanoTime());
LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{},"
+ "source partition:{}, tmp partition:{}",
jobId, dbId, tableId, sourcePartitionName, tmpPartitionName);
} catch (Exception e) {
allPartitionOptimized = false;
LOG.warn("optimize table failed dbId:{}, tableId:{} exception: {}", dbId, tableId, DebugUtil.getStackTrace(e));
throw new AlterCancelException("optimize table failed " + e.getMessage());
} finally {
disableDoubleWritePartition(db, targetTable);
}
}
/**
* cancelImpl() can be called any time any place.
* We need to clean any possible residual of this job.
*/
@Override
protected synchronized boolean cancelImpl(String errMsg) {
if (jobState.isFinalState()) {
return false;
}
cancelInternal();
jobState = JobState.CANCELLED;
this.errMsg = errMsg;
this.finishedTimeMs = System.currentTimeMillis();
LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg);
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
span.setStatus(StatusCode.ERROR, errMsg);
span.end();
return true;
}
private void cancelInternal() {
Database db = null;
Locker locker = new Locker();
try {
db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id:" + dbId + " does not exist");
}
if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) {
throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed");
}
} catch (Exception e) {
LOG.warn("get and write lock database failed when cancel job: {}", jobId, e);
return;
}
try {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
OlapTable targetTable = (OlapTable) table;
disableDoubleWritePartition(db, targetTable);
Set<Tablet> sourceTablets = Sets.newHashSet();
if (getTmpPartitionIds() != null) {
for (long pid : getTmpPartitionIds()) {
LOG.info("optimize job {} drop temp partition:{}", jobId, pid);
Partition partition = targetTable.getPartition(pid);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
} else {
LOG.warn("partition {} is null", pid);
}
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("exception when cancel optimize job.", e);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
protected boolean isPreviousLoadFinished() throws AnalysisException {
return GlobalStateMgr.getCurrentState().getGlobalTransactionMgr()
.isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
}
/**
* Replay job in PENDING state.
* Should replay all changes before this job's state transfer to PENDING.
*/
private void replayPending(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
tbl.setState(OlapTableState.SCHEMA_CHANGE);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.jobState = JobState.PENDING;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay pending optimize job: {}", jobId);
}
/**
* Replay job in WAITING_TXN state.
* Should replay all changes in runPendingJob()
*/
private void replayWaitingTxn(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
for (long id : replayedJob.getTmpPartitionIds()) {
tmpPartitionIds.add(id);
}
this.jobState = JobState.WAITING_TXN;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay waiting txn optimize job: {}", jobId);
}
private void onReplayFinished(OnlineOptimizeJobV2 replayedJob, OlapTable targetTable) {
this.sourcePartitionNames = replayedJob.sourcePartitionNames;
this.tmpPartitionNames = replayedJob.tmpPartitionNames;
this.allPartitionOptimized = replayedJob.allPartitionOptimized;
this.optimizeOperation = replayedJob.optimizeOperation;
Set<Tablet> sourceTablets = Sets.newHashSet();
for (long id : replayedJob.getTmpPartitionIds()) {
Partition partition = targetTable.getPartition(id);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
if (allPartitionOptimized) {
this.distributionInfo = replayedJob.distributionInfo;
LOG.debug("set distribution info to table: {}", distributionInfo);
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("finish replay optimize job {} dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
}
/**
* Replay job in FINISHED state.
* Should replay all changes in runRuningJob()
*/
private void replayFinished(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db != null) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl != null) {
onReplayFinished(replayedJob, tbl);
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
this.jobState = JobState.FINISHED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
LOG.info("replay finished optimize job: {}", jobId);
}
/**
* Replay job in CANCELLED state.
*/
private void replayCancelled(OnlineOptimizeJobV2 replayedJob) {
cancelInternal();
this.jobState = JobState.CANCELLED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
this.errMsg = replayedJob.errMsg;
LOG.info("replay cancelled optimize job: {}", jobId);
}
@Override
public void replay(AlterJobV2 replayedJob) {
OnlineOptimizeJobV2 replayedOptimizeJob = (OnlineOptimizeJobV2) replayedJob;
switch (replayedJob.jobState) {
case PENDING:
replayPending(replayedOptimizeJob);
break;
case WAITING_TXN:
replayWaitingTxn(replayedOptimizeJob);
break;
case FINISHED:
replayFinished(replayedOptimizeJob);
break;
case CANCELLED:
replayCancelled(replayedOptimizeJob);
break;
default:
break;
}
}
@Override
protected void getInfo(List<List<Comparable>> infos) {
List<Comparable> info = Lists.newArrayList();
info.add(jobId);
info.add(tableName);
info.add(TimeUtils.longToTimeString(createTimeMs));
info.add(TimeUtils.longToTimeString(finishedTimeMs));
info.add(optimizeOperation != null ? optimizeOperation : "");
info.add(watershedTxnId);
info.add(jobState.name());
info.add(errMsg);
info.add(progress);
info.add(timeoutMs / 1000);
infos.add(info);
}
public void setJobState(JobState jobState) {
this.jobState = jobState;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, OnlineOptimizeJobV2.class);
Text.writeString(out, json);
}
@Override
public void gsonPostProcess() throws IOException {
if (jobState != JobState.PENDING) {
return;
}
}
@Override
public Optional<Long> getTransactionId() {
return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId);
}
protected void executeSql(String sql) throws Exception {
LOG.info("execute sql : {}", sql);
ConnectContext context = ConnectContext.get();
if (context == null) {
context = new ConnectContext();
context.setGlobalStateMgr(GlobalStateMgr.getCurrentState());
context.setCurrentUserIdentity(UserIdentity.ROOT);
context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
context.setQualifiedUser(UserIdentity.ROOT.getUser());
context.setThreadLocalInfo();
}
StatementBase parsedStmt = SqlParser.parseOneWithStarRocksDialect(sql, context.getSessionVariable());
if (parsedStmt instanceof InsertStmt) {
((InsertStmt) parsedStmt).setIsVersionOverwrite(true);
}
StmtExecutor executor = new StmtExecutor(context, parsedStmt);
SessionVariable sessionVariable = context.getSessionVariable();
sessionVariable.setUsePageCache(false);
sessionVariable.setEnableMaterializedViewRewrite(false);
context.setExecutor(executor);
context.setQueryId(UUIDUtil.genUUID());
context.setStartTime();
executor.execute();
if (context.getState().getStateType() == QueryState.MysqlStateType.ERR) {
LOG.warn("Execute sql fail | Error Message [{}] | {} | SQL [{}]",
context.getState().getErrorMessage(), DebugUtil.printId(context.getQueryId()), sql);
throw new DdlException(context.getState().getErrorMessage());
}
}
}
|
class OnlineOptimizeJobV2 extends AlterJobV2 {
private static final Logger LOG = LogManager.getLogger(OnlineOptimizeJobV2.class);
@SerializedName(value = "watershedTxnId")
protected long watershedTxnId = -1;
private final String postfix;
private static final ExecutorService EXECUTOR = Executors.newCachedThreadPool();
private Future<Constants.TaskRunState> future = null;
@SerializedName(value = "tmpPartitionIds")
private List<Long> tmpPartitionIds = Lists.newArrayList();
private OptimizeClause optimizeClause;
private String dbName = "";
private Map<String, String> properties = Maps.newHashMap();
@SerializedName(value = "rewriteTasks")
private List<OptimizeTask> rewriteTasks = Lists.newArrayList();
private int progress = 0;
@SerializedName(value = "sourcePartitionNames")
private List<String> sourcePartitionNames = Lists.newArrayList();
@SerializedName(value = "tmpPartitionNames")
private List<String> tmpPartitionNames = Lists.newArrayList();
@SerializedName(value = "allPartitionOptimized")
private Boolean allPartitionOptimized = false;
@SerializedName(value = "distributionInfo")
private DistributionInfo distributionInfo;
@SerializedName(value = "optimizeOperation")
private String optimizeOperation = "";
public OnlineOptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs,
OptimizeClause optimizeClause) {
this(jobId, dbId, tableId, tableName, timeoutMs);
this.optimizeClause = optimizeClause;
}
public OnlineOptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) {
super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs);
this.postfix = "_" + jobId;
}
public List<Long> getTmpPartitionIds() {
return tmpPartitionIds;
}
public void setTmpPartitionIds(List<Long> tmpPartitionIds) {
this.tmpPartitionIds = tmpPartitionIds;
}
public String getName() {
return "online-optimize-" + this.postfix;
}
public Map<String, String> getProperties() {
return properties;
}
public List<OptimizeTask> getOptimizeTasks() {
return rewriteTasks;
}
private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
return (OlapTable) table;
}
/**
* runPendingJob():
* 1. Create all temp partitions and wait them finished.
* 2. Get a new transaction id, then set job's state to WAITING_TXN
*/
@Override
protected void runPendingJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.PENDING, jobState);
LOG.info("begin to send create temp partitions. job: {}", jobId);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
if (!checkTableStable(db)) {
return;
}
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
if (optimizeClause.isTableOptimize()) {
allPartitionOptimized = true;
}
for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) {
tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId());
}
long createPartitionStartTimestamp = System.currentTimeMillis();
OlapTable targetTable;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
targetTable = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
try {
PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix,
optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc(),
warehouseId);
LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId);
} catch (Exception e) {
LOG.warn("create temp partitions failed", e);
throw new AlterCancelException("create temp partitions failed " + e);
}
long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp;
this.jobState = JobState.WAITING_TXN;
this.optimizeOperation = optimizeClause.toString();
span.setAttribute("createPartitionElapse", createPartitionElapse);
span.setAttribute("watershedTxnId", this.watershedTxnId);
span.addEvent("setWaitingTxn");
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId);
}
/**
* runWaitingTxnJob():
* 1. Wait the transactions before the watershedTxnId to be finished.
* 2. If all previous transactions finished, start insert into data to temp partitions.
* 3. Change job state to RUNNING.
*/
@Override
private void enableDoubleWritePartition(Database db, OlapTable tbl, String sourcePartitionName, String tmpPartitionName) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
Preconditions.checkState(tbl.getState() == OlapTableState.SCHEMA_CHANGE);
tbl.addDoubleWritePartition(sourcePartitionName, tmpPartitionName);
LOG.info("job {} add double write partition {} to {}", jobId, tmpPartitionName, sourcePartitionName);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
private void disableDoubleWritePartition(Database db, OlapTable tbl) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
Preconditions.checkState(tbl.getState() == OlapTableState.SCHEMA_CHANGE);
tbl.clearDoubleWritePartition();
LOG.info("job {} clear double write partitions", jobId);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
/**
* runRunningJob()
* 1. Wait insert into tasks to be finished.
* 2. Replace partitions with temp partitions.
* 3. Set job'state as FINISHED.
*/
@Override
protected void runRunningJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.RUNNING, jobState);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
OlapTable tbl;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
tbl = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
int progress = 0;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) {
progress += 100 / rewriteTasks.size();
continue;
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.PENDING) {
enableDoubleWritePartition(db, tbl, rewriteTask.getPartitionName(), rewriteTask.getTempPartitionName());
this.watershedTxnId = GlobalStateMgr.getCurrentState()
.getGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.RUNNING);
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.RUNNING) {
try {
if (!isPreviousLoadFinished()) {
LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId);
return;
}
} catch (AnalysisException e) {
throw new AlterCancelException(e.getMessage());
}
if (this.future != null) {
if (this.future.isDone()) {
try {
rewriteTask.setOptimizeTaskState(future.get());
} catch (InterruptedException | ExecutionException e) {
LOG.warn("get rewrite task result failed", e);
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
}
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED) {
this.allPartitionOptimized = false;
}
this.future = null;
} else {
LOG.info("wait rewrite task {} to be finished, optimize job: {}", rewriteTask.getName(), jobId);
return;
}
} else {
LOG.info("previous transactions are all finished, begin to optimize task {}. job: {}",
rewriteTask.toString(), jobId);
Callable<Constants.TaskRunState> task = () -> {
try {
executeSql(rewriteTask.getDefinition());
LOG.info("finish rewrite task: {}", rewriteTask.getName());
return Constants.TaskRunState.SUCCESS;
} catch (Exception e) {
LOG.warn("create rewrite task failed", e);
return Constants.TaskRunState.FAILED;
}
};
this.future = EXECUTOR.submit(task);
return;
}
}
locker.lockDatabase(db, LockType.WRITE);
try {
onTaskFinished(db, tbl, rewriteTask);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
locker.lockDatabase(db, LockType.WRITE);
try {
onFinished(db, tbl);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId);
this.progress = 100;
this.jobState = JobState.FINISHED;
this.finishedTimeMs = System.currentTimeMillis();
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("optimize job finished: {}", jobId);
this.span.end();
}
@Override
protected void runFinishedRewritingJob() {
}
private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException {
try {
if (allPartitionOptimized && optimizeClause.getDistributionDesc() != null) {
this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns());
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("optimize table failed dbId:{}, tableId:{} exception: {}", dbId, tableId, e);
throw new AlterCancelException("optimize table failed " + e.getMessage());
}
}
private void onTaskFinished(Database db, OlapTable targetTable, OptimizeTask rewriteTask) throws AlterCancelException {
try {
String sourcePartitionName = rewriteTask.getPartitionName();
String tmpPartitionName = rewriteTask.getTempPartitionName();
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED) {
LOG.info("optimize job {} rewrite task {} state {} failed on partition {}",
jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), sourcePartitionName);
targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true);
throw new AlterCancelException(sourcePartitionName + " rewrite task execute failed");
}
Set<Tablet> sourceTablets = Sets.newHashSet();
Partition partition = targetTable.getPartition(sourcePartitionName);
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
PartitionInfo partitionInfo = targetTable.getPartitionInfo();
if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) {
targetTable.replaceTempPartitions(
Arrays.asList(sourcePartitionName), Arrays.asList(tmpPartitionName), true, false);
} else if (partitionInfo instanceof SinglePartitionInfo) {
targetTable.replacePartition(sourcePartitionName, tmpPartitionName);
} else {
throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported");
}
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(),
Arrays.asList(sourcePartitionName), Arrays.asList(tmpPartitionName),
true, false, partitionInfo instanceof SinglePartitionInfo);
GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info);
sourceTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
try {
GlobalStateMgr.getCurrentState().getColocateTableIndex().updateLakeTableColocationInfo(targetTable,
true /* isJoin */, null /* expectGroupId */);
} catch (DdlException e) {
LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage());
}
targetTable.lastSchemaUpdateTime.set(System.nanoTime());
LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{},"
+ "source partition:{}, tmp partition:{}",
jobId, dbId, tableId, sourcePartitionName, tmpPartitionName);
} catch (Exception e) {
allPartitionOptimized = false;
LOG.warn("optimize table failed dbId:{}, tableId:{} exception: {}", dbId, tableId, DebugUtil.getStackTrace(e));
throw new AlterCancelException("optimize table failed " + e.getMessage());
} finally {
disableDoubleWritePartition(db, targetTable);
}
}
/**
* cancelImpl() can be called any time any place.
* We need to clean any possible residual of this job.
*/
@Override
protected synchronized boolean cancelImpl(String errMsg) {
if (jobState.isFinalState()) {
return false;
}
cancelInternal();
jobState = JobState.CANCELLED;
this.errMsg = errMsg;
this.finishedTimeMs = System.currentTimeMillis();
LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg);
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
span.setStatus(StatusCode.ERROR, errMsg);
span.end();
return true;
}
private void cancelInternal() {
Database db = null;
Locker locker = new Locker();
try {
db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id:" + dbId + " does not exist");
}
if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) {
throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed");
}
} catch (Exception e) {
LOG.warn("get and write lock database failed when cancel job: {}", jobId, e);
return;
}
try {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
OlapTable targetTable = (OlapTable) table;
disableDoubleWritePartition(db, targetTable);
Set<Tablet> tmpTablets = Sets.newHashSet();
if (getTmpPartitionIds() != null) {
for (long pid : getTmpPartitionIds()) {
LOG.info("optimize job {} drop temp partition:{}", jobId, pid);
Partition partition = targetTable.getPartition(pid);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
tmpTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
} else {
LOG.warn("partition {} is null", pid);
}
}
}
tmpTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("exception when cancel optimize job.", e);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
protected boolean isPreviousLoadFinished() throws AnalysisException {
return GlobalStateMgr.getCurrentState().getGlobalTransactionMgr()
.isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
}
/**
* Replay job in PENDING state.
* Should replay all changes before this job's state transfer to PENDING.
*/
private void replayPending(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
tbl.setState(OlapTableState.SCHEMA_CHANGE);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.jobState = JobState.PENDING;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay pending optimize job: {}", jobId);
}
/**
* Replay job in WAITING_TXN state.
* Should replay all changes in runPendingJob()
*/
private void replayWaitingTxn(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
for (long id : replayedJob.getTmpPartitionIds()) {
tmpPartitionIds.add(id);
}
this.jobState = JobState.WAITING_TXN;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay waiting txn optimize job: {}", jobId);
}
private void onReplayFinished(OnlineOptimizeJobV2 replayedJob, OlapTable targetTable) {
this.sourcePartitionNames = replayedJob.sourcePartitionNames;
this.tmpPartitionNames = replayedJob.tmpPartitionNames;
this.allPartitionOptimized = replayedJob.allPartitionOptimized;
this.optimizeOperation = replayedJob.optimizeOperation;
Set<Tablet> sourceTablets = Sets.newHashSet();
for (long id : replayedJob.getTmpPartitionIds()) {
Partition partition = targetTable.getPartition(id);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentState().getTabletInvertedIndex()::markTabletForceDelete);
if (allPartitionOptimized) {
this.distributionInfo = replayedJob.distributionInfo;
LOG.debug("set distribution info to table: {}", distributionInfo);
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("finish replay optimize job {} dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
}
/**
* Replay job in FINISHED state.
* Should replay all changes in runRuningJob()
*/
private void replayFinished(OnlineOptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db != null) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl != null) {
onReplayFinished(replayedJob, tbl);
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
this.jobState = JobState.FINISHED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
LOG.info("replay finished optimize job: {}", jobId);
}
/**
* Replay job in CANCELLED state.
*/
private void replayCancelled(OnlineOptimizeJobV2 replayedJob) {
cancelInternal();
this.jobState = JobState.CANCELLED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
this.errMsg = replayedJob.errMsg;
LOG.info("replay cancelled optimize job: {}", jobId);
}
@Override
public void replay(AlterJobV2 replayedJob) {
OnlineOptimizeJobV2 replayedOptimizeJob = (OnlineOptimizeJobV2) replayedJob;
switch (replayedJob.jobState) {
case PENDING:
replayPending(replayedOptimizeJob);
break;
case WAITING_TXN:
replayWaitingTxn(replayedOptimizeJob);
break;
case FINISHED:
replayFinished(replayedOptimizeJob);
break;
case CANCELLED:
replayCancelled(replayedOptimizeJob);
break;
default:
break;
}
}
@Override
protected void getInfo(List<List<Comparable>> infos) {
List<Comparable> info = Lists.newArrayList();
info.add(jobId);
info.add(tableName);
info.add(TimeUtils.longToTimeString(createTimeMs));
info.add(TimeUtils.longToTimeString(finishedTimeMs));
info.add(optimizeOperation != null ? optimizeOperation : "");
info.add(watershedTxnId);
info.add(jobState.name());
info.add(errMsg);
info.add(progress);
info.add(timeoutMs / 1000);
infos.add(info);
}
public void setJobState(JobState jobState) {
this.jobState = jobState;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, OnlineOptimizeJobV2.class);
Text.writeString(out, json);
}
@Override
public Optional<Long> getTransactionId() {
return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId);
}
protected void executeSql(String sql) throws Exception {
LOG.info("execute sql : {}", sql);
ConnectContext context = ConnectContext.get();
if (context == null) {
context = new ConnectContext();
context.setGlobalStateMgr(GlobalStateMgr.getCurrentState());
context.setCurrentUserIdentity(UserIdentity.ROOT);
context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
context.setQualifiedUser(UserIdentity.ROOT.getUser());
context.setThreadLocalInfo();
}
StatementBase parsedStmt = SqlParser.parseOneWithStarRocksDialect(sql, context.getSessionVariable());
if (parsedStmt instanceof InsertStmt) {
((InsertStmt) parsedStmt).setIsVersionOverwrite(true);
}
StmtExecutor executor = new StmtExecutor(context, parsedStmt);
SessionVariable sessionVariable = context.getSessionVariable();
sessionVariable.setUsePageCache(false);
sessionVariable.setEnableMaterializedViewRewrite(false);
context.setExecutor(executor);
context.setQueryId(UUIDUtil.genUUID());
context.setStartTime();
executor.execute();
if (context.getState().getStateType() == QueryState.MysqlStateType.ERR) {
LOG.warn("Execute sql fail | Error Message [{}] | {} | SQL [{}]",
context.getState().getErrorMessage(), DebugUtil.printId(context.getQueryId()), sql);
throw new AlterCancelException(context.getState().getErrorMessage());
}
}
}
|
susceptible to SQL injection attack.
|
public Mono<Integer> readMinThroughput() {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers("select * from c where c.offerResourceId = '"
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
return database.getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink())
.single();
}).map(cosmosOfferResponse -> Integer.parseInt(cosmosOfferResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.OFFER_MIN_THROUGHPUT)));
}
|
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
|
public Mono<Integer> readMinThroughput() {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers(
new SqlQuerySpec("select * from c where c.offerResourceId = @OFFER_RESOURCE_ID",
new SqlParameterList(new SqlParameter("@OFFER_RESOURCE_ID", cosmosContainerResponse.resourceSettings().resourceId()))), new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
return database.getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink())
.single();
}).map(cosmosOfferResponse -> Integer.parseInt(cosmosOfferResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.OFFER_MIN_THROUGHPUT)));
}
|
class CosmosContainer {
private CosmosDatabase database;
private String id;
private CosmosScripts scripts;
CosmosContainer(String id, CosmosDatabase database) {
this.id = id;
this.database = database;
}
/**
* Get the id of the {@link CosmosContainer}
*
* @return the id of the {@link CosmosContainer}
*/
public String id() {
return id;
}
/**
* Set the id of the {@link CosmosContainer}
*
* @param id the id of the {@link CosmosContainer}
* @return the same {@link CosmosContainer} that had the id set
*/
CosmosContainer id(String id) {
this.id = id;
return this;
}
/**
* Reads the document container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the read container. In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single cosmos container response with
* the read container or an error.
*/
public Mono<CosmosContainerResponse> read() {
return read(new CosmosContainerRequestOptions());
}
/**
* Reads the document container by the container link.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the read container. In case of failure the {@link Mono} will error.
*
* @param options The cosmos container request options.
* @return an {@link Mono} containing the single cosmos container response with
* the read container or an error.
*/
public Mono<CosmosContainerResponse> read(CosmosContainerRequestOptions options) {
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper().readCollection(getLink(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/**
* Deletes the item container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response for the
* deleted database. In case of failure the {@link Mono} will error.
*
* @param options the request options.
* @return an {@link Mono} containing the single cosmos container response for
* the deleted database or an error.
*/
public Mono<CosmosContainerResponse> delete(CosmosContainerRequestOptions options) {
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper().deleteCollection(getLink(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/**
* Deletes the item container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response for the
* deleted container. In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single cosmos container response for
* the deleted container or an error.
*/
public Mono<CosmosContainerResponse> delete() {
return delete(new CosmosContainerRequestOptions());
}
/**
* Replaces a document container.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the replaced document container. In case of failure the {@link Mono} will
* error.
*
* @param containerSettings the item container properties
* @return an {@link Mono} containing the single cosmos container response with
* the replaced document container or an error.
*/
public Mono<CosmosContainerResponse> replace(CosmosContainerProperties containerSettings) {
return replace(containerSettings, null);
}
/**
* Replaces a document container.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the replaced document container. In case of failure the {@link Mono} will
* error.
*
* @param containerSettings the item container properties
* @param options the cosmos container request options.
* @return an {@link Mono} containing the single cosmos container response with
* the replaced document container or an error.
*/
public Mono<CosmosContainerResponse> replace(CosmosContainerProperties containerSettings,
CosmosContainerRequestOptions options) {
validateResource(containerSettings);
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper()
.replaceCollection(containerSettings.getV2Collection(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/* CosmosItem operations */
/**
* Creates a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* created cosmos item. In case of failure the {@link Mono} will error.
*
* @param item the cosmos item represented as a POJO or cosmos item object.
* @return an {@link Mono} containing the single resource response with the
* created cosmos item or an error.
*/
public Mono<CosmosItemResponse> createItem(Object item) {
return createItem(item, new CosmosItemRequestOptions());
}
/**
* Creates a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* created cosmos item. In case of failure the {@link Mono} will error.
*
* @param item the cosmos item represented as a POJO or cosmos item object.
* @param options the request options.
* @return an {@link Mono} containing the single resource response with the
* created cosmos item or an error.
*/
public Mono<CosmosItemResponse> createItem(Object item, CosmosItemRequestOptions options) {
if (options == null) {
options = new CosmosItemRequestOptions();
}
RequestOptions requestOptions = options.toRequestOptions();
return database.getDocClientWrapper()
.createDocument(getLink(), CosmosItemProperties.fromObject(item), requestOptions, true)
.map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single();
}
/**
* Upserts an item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* upserted item. In case of failure the {@link Mono} will error.
*
* @param item the item represented as a POJO or Item object to upsert.
* @return an {@link Mono} containing the single resource response with the
* upserted document or an error.
*/
public Mono<CosmosItemResponse> upsertItem(Object item) {
return upsertItem(item, null);
}
/**
* Upserts a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* upserted item. In case of failure the {@link Mono} will error.
*
* @param item the item represented as a POJO or Item object to upsert.
* @param options the request options.
* @return an {@link Mono} containing the single resource response with the
* upserted document or an error.
*/
public Mono<CosmosItemResponse> upsertItem(Object item, CosmosItemRequestOptions options) {
if (options == null) {
options = new CosmosItemRequestOptions();
}
RequestOptions requestOptions = options.toRequestOptions();
return this.getDatabase().getDocClientWrapper()
.upsertDocument(this.getLink(), CosmosItemProperties.fromObject(item), options.toRequestOptions(), true)
.map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single();
}
/**
* Reads all cosmos items in the container.
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the read cosmos items. In case of
* failure the {@link Flux} will error.
*
* @return an {@link Flux} containing one or several feed response pages of the
* read cosmos items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> readAllItems() {
return readAllItems(new FeedOptions());
}
/**
* Reads all cosmos items in a container.
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the read cosmos items. In case of
* failure the {@link Flux} will error.
*
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* read cosmos items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> readAllItems(FeedOptions options) {
return getDatabase().getDocClientWrapper().readDocuments(getLink(), options).map(
response -> BridgeInternal.createFeedResponse(CosmosItemProperties.getFromV2Results(response.results()),
response.responseHeaders()));
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param query the query.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(String query) {
return queryItems(new SqlQuerySpec(query), null);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param query the query.
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(String query, FeedOptions options) {
return queryItems(new SqlQuerySpec(query), options);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param querySpec the SQL query specification.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(SqlQuerySpec querySpec) {
return queryItems(querySpec, null);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param querySpec the SQL query specification.
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(SqlQuerySpec querySpec, FeedOptions options) {
return getDatabase().getDocClientWrapper().queryDocuments(getLink(), querySpec, options)
.map(response -> BridgeInternal.createFeedResponseWithQueryMetrics(
CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(),
response.queryMetrics()));
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param changeFeedOptions the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) {
return getDatabase().getDocClientWrapper().queryDocumentChangeFeed(getLink(), changeFeedOptions)
.map(response -> new FeedResponse<CosmosItemProperties>(
CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(), false));
}
/**
* Gets a CosmosItem object without making a service call
*
* @param id id of the item
* @param partitionKey the partition key
* @return a cosmos item
*/
public CosmosItem getItem(String id, Object partitionKey) {
return new CosmosItem(id, partitionKey, this);
}
public CosmosScripts getScripts() {
if (this.scripts == null) {
this.scripts = new CosmosScripts(this);
}
return this.scripts;
}
/**
* Lists all the conflicts in the container
*
* @param options the feed options
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> readAllConflicts(FeedOptions options) {
return database.getDocClientWrapper().readConflicts(getLink(), options)
.map(response -> BridgeInternal.createFeedResponse(
CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders()));
}
/**
* Queries all the conflicts in the container
*
* @param query the query
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> queryConflicts(String query) {
return queryConflicts(query, null);
}
/**
* Queries all the conflicts in the container
*
* @param query the query
* @param options the feed options
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> queryConflicts(String query, FeedOptions options) {
return database.getDocClientWrapper().queryConflicts(getLink(), query, options)
.map(response -> BridgeInternal.createFeedResponse(
CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders()));
}
/**
* Gets a CosmosConflict object without making a service call
*
* @param id id of the cosmos conflict
* @return a cosmos conflict
*/
public CosmosConflict getConflict(String id) {
return new CosmosConflict(id, this);
}
/**
* Gets the throughput of the container
*
* @return a {@link Mono} containing throughput or an error.
*/
public Mono<Integer> readProvisionedThroughput() {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers("select * from c where c.offerResourceId = '"
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
return database.getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink())
.single();
}).map(cosmosOfferResponse -> cosmosOfferResponse.getResource().getThroughput());
}
/**
* Gets the min throughput to which this container can be scaled down to
*
* @return a {@link Mono} containing min throughput or an error.
*/
/**
* Sets throughput provisioned for a container in measurement of
* Requests-per-Unit in the Azure Cosmos service.
*
* @param requestUnitsPerSecond the cosmos container throughput, expressed in
* Request Units per second
* @return a {@link Mono} containing throughput or an error.
*/
public Mono<Integer> replaceProvisionedThroughput(int requestUnitsPerSecond) {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers("select * from c where c.offerResourceId = '"
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
Offer offer = offerFeedResponse.results().get(0);
offer.setThroughput(requestUnitsPerSecond);
return database.getDocClientWrapper().replaceOffer(offer).single();
}).map(offerResourceResponse -> offerResourceResponse.getResource().getThroughput());
}
/**
* Gets the parent Database
*
* @return the {@link CosmosDatabase}
*/
public CosmosDatabase getDatabase() {
return database;
}
String URIPathSegment() {
return Paths.COLLECTIONS_PATH_SEGMENT;
}
String parentLink() {
return database.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(parentLink());
builder.append("/");
builder.append(URIPathSegment());
builder.append("/");
builder.append(id());
return builder.toString();
}
}
|
class CosmosContainer {
private CosmosDatabase database;
private String id;
private CosmosScripts scripts;
CosmosContainer(String id, CosmosDatabase database) {
this.id = id;
this.database = database;
}
/**
* Get the id of the {@link CosmosContainer}
*
* @return the id of the {@link CosmosContainer}
*/
public String id() {
return id;
}
/**
* Set the id of the {@link CosmosContainer}
*
* @param id the id of the {@link CosmosContainer}
* @return the same {@link CosmosContainer} that had the id set
*/
CosmosContainer id(String id) {
this.id = id;
return this;
}
/**
* Reads the document container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the read container. In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single cosmos container response with
* the read container or an error.
*/
public Mono<CosmosContainerResponse> read() {
return read(new CosmosContainerRequestOptions());
}
/**
* Reads the document container by the container link.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the read container. In case of failure the {@link Mono} will error.
*
* @param options The cosmos container request options.
* @return an {@link Mono} containing the single cosmos container response with
* the read container or an error.
*/
public Mono<CosmosContainerResponse> read(CosmosContainerRequestOptions options) {
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper().readCollection(getLink(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/**
* Deletes the item container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response for the
* deleted database. In case of failure the {@link Mono} will error.
*
* @param options the request options.
* @return an {@link Mono} containing the single cosmos container response for
* the deleted database or an error.
*/
public Mono<CosmosContainerResponse> delete(CosmosContainerRequestOptions options) {
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper().deleteCollection(getLink(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/**
* Deletes the item container
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response for the
* deleted container. In case of failure the {@link Mono} will error.
*
* @return an {@link Mono} containing the single cosmos container response for
* the deleted container or an error.
*/
public Mono<CosmosContainerResponse> delete() {
return delete(new CosmosContainerRequestOptions());
}
/**
* Replaces a document container.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the replaced document container. In case of failure the {@link Mono} will
* error.
*
* @param containerSettings the item container properties
* @return an {@link Mono} containing the single cosmos container response with
* the replaced document container or an error.
*/
public Mono<CosmosContainerResponse> replace(CosmosContainerProperties containerSettings) {
return replace(containerSettings, null);
}
/**
* Replaces a document container.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single cosmos container response with
* the replaced document container. In case of failure the {@link Mono} will
* error.
*
* @param containerSettings the item container properties
* @param options the cosmos container request options.
* @return an {@link Mono} containing the single cosmos container response with
* the replaced document container or an error.
*/
public Mono<CosmosContainerResponse> replace(CosmosContainerProperties containerSettings,
CosmosContainerRequestOptions options) {
validateResource(containerSettings);
if (options == null) {
options = new CosmosContainerRequestOptions();
}
return database.getDocClientWrapper()
.replaceCollection(containerSettings.getV2Collection(), options.toRequestOptions())
.map(response -> new CosmosContainerResponse(response, database)).single();
}
/* CosmosItem operations */
/**
* Creates a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* created cosmos item. In case of failure the {@link Mono} will error.
*
* @param item the cosmos item represented as a POJO or cosmos item object.
* @return an {@link Mono} containing the single resource response with the
* created cosmos item or an error.
*/
public Mono<CosmosItemResponse> createItem(Object item) {
return createItem(item, new CosmosItemRequestOptions());
}
/**
* Creates a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* created cosmos item. In case of failure the {@link Mono} will error.
*
* @param item the cosmos item represented as a POJO or cosmos item object.
* @param options the request options.
* @return an {@link Mono} containing the single resource response with the
* created cosmos item or an error.
*/
public Mono<CosmosItemResponse> createItem(Object item, CosmosItemRequestOptions options) {
if (options == null) {
options = new CosmosItemRequestOptions();
}
RequestOptions requestOptions = options.toRequestOptions();
return database.getDocClientWrapper()
.createDocument(getLink(), CosmosItemProperties.fromObject(item), requestOptions, true)
.map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single();
}
/**
* Upserts an item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* upserted item. In case of failure the {@link Mono} will error.
*
* @param item the item represented as a POJO or Item object to upsert.
* @return an {@link Mono} containing the single resource response with the
* upserted document or an error.
*/
public Mono<CosmosItemResponse> upsertItem(Object item) {
return upsertItem(item, null);
}
/**
* Upserts a cosmos item.
*
* After subscription the operation will be performed. The {@link Mono} upon
* successful completion will contain a single resource response with the
* upserted item. In case of failure the {@link Mono} will error.
*
* @param item the item represented as a POJO or Item object to upsert.
* @param options the request options.
* @return an {@link Mono} containing the single resource response with the
* upserted document or an error.
*/
public Mono<CosmosItemResponse> upsertItem(Object item, CosmosItemRequestOptions options) {
if (options == null) {
options = new CosmosItemRequestOptions();
}
RequestOptions requestOptions = options.toRequestOptions();
return this.getDatabase().getDocClientWrapper()
.upsertDocument(this.getLink(), CosmosItemProperties.fromObject(item), options.toRequestOptions(), true)
.map(response -> new CosmosItemResponse(response, requestOptions.getPartitionKey(), this)).single();
}
/**
* Reads all cosmos items in the container.
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the read cosmos items. In case of
* failure the {@link Flux} will error.
*
* @return an {@link Flux} containing one or several feed response pages of the
* read cosmos items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> readAllItems() {
return readAllItems(new FeedOptions());
}
/**
* Reads all cosmos items in a container.
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the read cosmos items. In case of
* failure the {@link Flux} will error.
*
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* read cosmos items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> readAllItems(FeedOptions options) {
return getDatabase().getDocClientWrapper().readDocuments(getLink(), options).map(
response -> BridgeInternal.createFeedResponse(CosmosItemProperties.getFromV2Results(response.results()),
response.responseHeaders()));
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param query the query.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(String query) {
return queryItems(new SqlQuerySpec(query), null);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param query the query.
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(String query, FeedOptions options) {
return queryItems(new SqlQuerySpec(query), options);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param querySpec the SQL query specification.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(SqlQuerySpec querySpec) {
return queryItems(querySpec, null);
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param querySpec the SQL query specification.
* @param options the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryItems(SqlQuerySpec querySpec, FeedOptions options) {
return getDatabase().getDocClientWrapper().queryDocuments(getLink(), querySpec, options)
.map(response -> BridgeInternal.createFeedResponseWithQueryMetrics(
CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(),
response.queryMetrics()));
}
/**
* Query for documents in a items in a container
*
* After subscription the operation will be performed. The {@link Flux} will
* contain one or several feed response of the obtained items. In case of
* failure the {@link Flux} will error.
*
* @param changeFeedOptions the feed options.
* @return an {@link Flux} containing one or several feed response pages of the
* obtained items or an error.
*/
public Flux<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) {
return getDatabase().getDocClientWrapper().queryDocumentChangeFeed(getLink(), changeFeedOptions)
.map(response -> new FeedResponse<CosmosItemProperties>(
CosmosItemProperties.getFromV2Results(response.results()), response.responseHeaders(), false));
}
/**
* Gets a CosmosItem object without making a service call
*
* @param id id of the item
* @param partitionKey the partition key
* @return a cosmos item
*/
public CosmosItem getItem(String id, Object partitionKey) {
return new CosmosItem(id, partitionKey, this);
}
public CosmosScripts getScripts() {
if (this.scripts == null) {
this.scripts = new CosmosScripts(this);
}
return this.scripts;
}
/**
* Lists all the conflicts in the container
*
* @param options the feed options
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> readAllConflicts(FeedOptions options) {
return database.getDocClientWrapper().readConflicts(getLink(), options)
.map(response -> BridgeInternal.createFeedResponse(
CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders()));
}
/**
* Queries all the conflicts in the container
*
* @param query the query
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> queryConflicts(String query) {
return queryConflicts(query, null);
}
/**
* Queries all the conflicts in the container
*
* @param query the query
* @param options the feed options
* @return a {@link Flux} containing one or several feed response pages of the
* obtained conflicts or an error.
*/
public Flux<FeedResponse<CosmosConflictProperties>> queryConflicts(String query, FeedOptions options) {
return database.getDocClientWrapper().queryConflicts(getLink(), query, options)
.map(response -> BridgeInternal.createFeedResponse(
CosmosConflictProperties.getFromV2Results(response.results()), response.responseHeaders()));
}
/**
* Gets a CosmosConflict object without making a service call
*
* @param id id of the cosmos conflict
* @return a cosmos conflict
*/
public CosmosConflict getConflict(String id) {
return new CosmosConflict(id, this);
}
/**
* Gets the throughput of the container
*
* @return a {@link Mono} containing throughput or an error.
*/
public Mono<Integer> readProvisionedThroughput() {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers("select * from c where c.offerResourceId = '"
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
return database.getDocClientWrapper().readOffer(offerFeedResponse.results().get(0).selfLink())
.single();
}).map(cosmosOfferResponse -> cosmosOfferResponse.getResource().getThroughput());
}
/**
* Gets the min throughput to which this container can be scaled down to
*
* @return a {@link Mono} containing min throughput or an error.
*/
/**
* Sets throughput provisioned for a container in measurement of
* Requests-per-Unit in the Azure Cosmos service.
*
* @param requestUnitsPerSecond the cosmos container throughput, expressed in
* Request Units per second
* @return a {@link Mono} containing throughput or an error.
*/
public Mono<Integer> replaceProvisionedThroughput(int requestUnitsPerSecond) {
return this.read().flatMap(cosmosContainerResponse -> database.getDocClientWrapper()
.queryOffers("select * from c where c.offerResourceId = '"
+ cosmosContainerResponse.resourceSettings().resourceId() + "'", new FeedOptions())
.single()).flatMap(offerFeedResponse -> {
if (offerFeedResponse.results().isEmpty()) {
return Mono.error(BridgeInternal.createCosmosClientException(HttpConstants.StatusCodes.BADREQUEST,
"No offers found for the resource"));
}
Offer offer = offerFeedResponse.results().get(0);
offer.setThroughput(requestUnitsPerSecond);
return database.getDocClientWrapper().replaceOffer(offer).single();
}).map(offerResourceResponse -> offerResourceResponse.getResource().getThroughput());
}
/**
* Gets the parent Database
*
* @return the {@link CosmosDatabase}
*/
public CosmosDatabase getDatabase() {
return database;
}
String URIPathSegment() {
return Paths.COLLECTIONS_PATH_SEGMENT;
}
String parentLink() {
return database.getLink();
}
String getLink() {
StringBuilder builder = new StringBuilder();
builder.append(parentLink());
builder.append("/");
builder.append(URIPathSegment());
builder.append("/");
builder.append(id());
return builder.toString();
}
}
|
with guava (use vendor if so) this can be simpler, but that's not blocking at all for this PR `return Strings.isNullOrEmpty(options.getTwister2Home());`
|
private boolean isLocalMode(Twister2PipelineOptions options) {
if (options.getTwister2Home() == null || "".equals(options.getTwister2Home())) {
return true;
} else {
return false;
}
}
|
if (options.getTwister2Home() == null || "".equals(options.getTwister2Home())) {
|
private boolean isLocalMode(Twister2PipelineOptions options) {
if (options.getTwister2Home() == null || "".equals(options.getTwister2Home())) {
return true;
} else {
return false;
}
}
|
class Twister2Runner extends PipelineRunner<PipelineResult> {
private static final Logger LOG = Logger.getLogger(Twister2Runner.class.getName());
private static final String SIDEINPUTS = "sideInputs";
private static final String LEAVES = "leaves";
private static final String GRAPH = "graph";
/** Provided options. */
private final Twister2PipelineOptions options;
protected Twister2Runner(Twister2PipelineOptions options) {
this.options = options;
}
public static Twister2Runner fromOptions(PipelineOptions options) {
Twister2PipelineOptions pipelineOptions =
PipelineOptionsValidator.validate(Twister2PipelineOptions.class, options);
if (pipelineOptions.getFilesToStage() == null) {
pipelineOptions.setFilesToStage(
detectClassPathResourcesToStage(Twister2Runner.class.getClassLoader(), pipelineOptions));
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged"
+ pipelineOptions.getFilesToStage().size());
}
return new Twister2Runner(pipelineOptions);
}
@Override
public PipelineResult run(Pipeline pipeline) {
Twister2PipelineExecutionEnvironment env = new Twister2PipelineExecutionEnvironment(options);
LOG.info("Translating pipeline to Twister2 program.");
pipeline.replaceAll(getDefaultOverrides());
SplittableParDo.validateNoPrimitiveReads(pipeline);
env.translate(pipeline);
setupSystem(options);
Map configMap = new HashMap();
JobConfig jobConfig = new JobConfig();
if (isLocalMode(options)) {
options.setParallelism(1);
configMap.put(SIDEINPUTS, extractNames(env.getSideInputs()));
configMap.put(LEAVES, extractNames(env.getLeaves()));
configMap.put(GRAPH, env.getTSetGraph());
configMap.put("twister2.network.buffer.size", 32000);
configMap.put("twister2.network.sendBuffer.count", options.getParallelism());
LOG.warning("Twister2 Local Mode currently only supports single worker");
} else {
jobConfig.put(SIDEINPUTS, extractNames(env.getSideInputs()));
jobConfig.put(LEAVES, extractNames(env.getLeaves()));
jobConfig.put(GRAPH, env.getTSetGraph());
}
Config config = ResourceAllocator.loadConfig(configMap);
int workers = options.getParallelism();
Twister2Job twister2Job =
Twister2Job.newBuilder()
.setJobName(options.getJobName())
.setWorkerClass(BeamBatchWorker.class)
.addComputeResource(options.getWorkerCPUs(), options.getRamMegaBytes(), workers)
.setConfig(jobConfig)
.build();
Twister2JobState jobState;
if (isLocalMode(options)) {
jobState = LocalSubmitter.submitJob(twister2Job, config);
} else {
jobState = Twister2Submitter.submitJob(twister2Job, config);
}
Twister2PipelineResult result = new Twister2PipelineResult(jobState);
return result;
}
/** Check if the Runner is set to use Twister local mode or pointing to a deployment. */
public PipelineResult runTest(Pipeline pipeline) {
Twister2PipelineExecutionEnvironment env = new Twister2PipelineExecutionEnvironment(options);
LOG.info("Translating pipeline to Twister2 program.");
pipeline.replaceAll(getDefaultOverrides());
SplittableParDo.validateNoPrimitiveReads(pipeline);
env.translate(pipeline);
setupSystemTest(options);
Map configMap = new HashMap();
configMap.put(SIDEINPUTS, extractNames(env.getSideInputs()));
configMap.put(LEAVES, extractNames(env.getLeaves()));
configMap.put(GRAPH, env.getTSetGraph());
configMap.put("twister2.network.buffer.size", 32000);
configMap.put("twister2.network.sendBuffer.count", options.getParallelism());
Config config = ResourceAllocator.loadConfig(configMap);
JobConfig jobConfig = new JobConfig();
int workers = options.getParallelism();
Twister2Job twister2Job =
Twister2Job.newBuilder()
.setJobName(options.getJobName())
.setWorkerClass(BeamBatchWorker.class)
.addComputeResource(options.getWorkerCPUs(), options.getRamMegaBytes(), workers)
.setConfig(jobConfig)
.build();
Twister2JobState jobState = LocalSubmitter.submitJob(twister2Job, config);
Twister2PipelineResult result = new Twister2PipelineResult(jobState);
if (result.state == PipelineResult.State.FAILED) {
throw new RuntimeException("Pipeline execution failed", jobState.getCause());
}
return result;
}
private void setupSystem(Twister2PipelineOptions options) {
prepareFilesToStage(options);
zipFilesToStage(options);
System.setProperty("cluster_type", options.getClusterType());
System.setProperty("job_file", options.getJobFileZip());
System.setProperty("job_type", options.getJobType());
if (isLocalMode(options)) {
System.setProperty("twister2_home", System.getProperty("java.io.tmpdir"));
System.setProperty("config_dir", System.getProperty("java.io.tmpdir") + "/conf/");
} else {
System.setProperty("twister2_home", options.getTwister2Home());
System.setProperty("config_dir", options.getTwister2Home() + "/conf/");
File cDir = new File(System.getProperty("config_dir"), options.getClusterType());
String[] filesList =
new String[] {
"core.yaml", "network.yaml", "data.yaml", "resource.yaml", "task.yaml",
};
for (String file : filesList) {
File toCheck = new File(cDir, file);
if (!toCheck.exists()) {
throw new Twister2RuntimeException(
"Couldn't find " + file + " in config directory specified.");
}
}
FileInputStream fis = null;
try {
fis = new FileInputStream(new File(cDir, "logger.properties"));
LogManager.getLogManager().readConfiguration(fis);
fis.close();
} catch (IOException e) {
LOG.warning("Couldn't load logging configuration");
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
LOG.info(e.getMessage());
}
}
}
}
}
private void setupSystemTest(Twister2PipelineOptions options) {
prepareFilesToStage(options);
zipFilesToStage(options);
System.setProperty("cluster_type", options.getClusterType());
System.setProperty("twister2_home", System.getProperty("java.io.tmpdir"));
System.setProperty("job_file", options.getJobFileZip());
System.setProperty("job_type", options.getJobType());
}
private Set<String> extractNames(Set<TSet> leaves) {
Set<String> results = new HashSet<>();
for (TSet leaf : leaves) {
results.add(leaf.getId());
}
return results;
}
private Map<String, String> extractNames(Map<String, BatchTSet<?>> sideInputs) {
Map<String, String> results = new LinkedHashMap<>();
for (Map.Entry<String, BatchTSet<?>> entry : sideInputs.entrySet()) {
results.put(entry.getKey(), entry.getValue().getId());
}
return results;
}
/**
* Classpath contains non jar files (eg. directories with .class files or empty directories) will
* cause exception in running log.
*/
private void prepareFilesToStage(Twister2PipelineOptions options) {
List<String> filesToStage =
options.getFilesToStage().stream()
.map(File::new)
.filter(File::exists)
.map(
file -> {
return file.getAbsolutePath();
})
.collect(Collectors.toList());
options.setFilesToStage(
PipelineResources.prepareFilesForStaging(
filesToStage,
MoreObjects.firstNonNull(
options.getTempLocation(), System.getProperty("java.io.tmpdir"))));
}
/**
* creates a single zip file from all the jar files that are listed as files to stage in options.
*
* @param options
*/
private void zipFilesToStage(Twister2PipelineOptions options) {
File zipFile = null;
Set<String> jarSet = new HashSet<>();
List<String> filesToStage = options.getFilesToStage();
List<String> trimmed = new ArrayList<>();
for (String file : filesToStage) {
if (!file.contains("/org/twister2")) {
trimmed.add(file);
}
}
FileInputStream fis = null;
try {
zipFile = File.createTempFile("twister2-", ".zip");
FileOutputStream fos = new FileOutputStream(zipFile);
ZipOutputStream zipOut = new ZipOutputStream(fos);
zipOut.putNextEntry(new ZipEntry("lib/"));
for (String srcFile : trimmed) {
File fileToZip = new File(srcFile);
if (!jarSet.contains(fileToZip.getName())) {
jarSet.add(fileToZip.getName());
} else {
continue;
}
fis = new FileInputStream(fileToZip);
ZipEntry zipEntry = new ZipEntry("lib/" + fileToZip.getName());
zipOut.putNextEntry(zipEntry);
byte[] bytes = new byte[1024];
int length;
while ((length = fis.read(bytes)) >= 0) {
zipOut.write(bytes, 0, length);
}
fis.close();
}
zipOut.close();
fos.close();
zipFile.deleteOnExit();
} catch (FileNotFoundException e) {
LOG.info(e.getMessage());
} catch (IOException e) {
LOG.info(e.getMessage());
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
LOG.info(e.getMessage());
}
}
}
if (zipFile != null) {
options.setJobFileZip(zipFile.getPath());
}
}
private static List<PTransformOverride> getDefaultOverrides() {
List<PTransformOverride> overrides =
ImmutableList.<PTransformOverride>builder()
.add(
PTransformOverride.of(
PTransformMatchers.splittableParDo(), new SplittableParDo.OverrideFactory()))
.add(
PTransformOverride.of(
PTransformMatchers.urnEqualTo(
PTransformTranslation.SPLITTABLE_PROCESS_KEYED_URN),
new SplittableParDoNaiveBounded.OverrideFactory()))
.build();
return overrides;
}
}
|
class Twister2Runner extends PipelineRunner<PipelineResult> {
private static final Logger LOG = Logger.getLogger(Twister2Runner.class.getName());
private static final String SIDEINPUTS = "sideInputs";
private static final String LEAVES = "leaves";
private static final String GRAPH = "graph";
/** Provided options. */
private final Twister2PipelineOptions options;
protected Twister2Runner(Twister2PipelineOptions options) {
this.options = options;
}
public static Twister2Runner fromOptions(PipelineOptions options) {
Twister2PipelineOptions pipelineOptions =
PipelineOptionsValidator.validate(Twister2PipelineOptions.class, options);
if (pipelineOptions.getFilesToStage() == null) {
pipelineOptions.setFilesToStage(
detectClassPathResourcesToStage(Twister2Runner.class.getClassLoader(), pipelineOptions));
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged"
+ pipelineOptions.getFilesToStage().size());
}
return new Twister2Runner(pipelineOptions);
}
@Override
public PipelineResult run(Pipeline pipeline) {
Twister2PipelineExecutionEnvironment env = new Twister2PipelineExecutionEnvironment(options);
LOG.info("Translating pipeline to Twister2 program.");
pipeline.replaceAll(getDefaultOverrides());
SplittableParDo.validateNoPrimitiveReads(pipeline);
env.translate(pipeline);
setupSystem(options);
Map configMap = new HashMap();
JobConfig jobConfig = new JobConfig();
if (isLocalMode(options)) {
options.setParallelism(1);
configMap.put(SIDEINPUTS, extractNames(env.getSideInputs()));
configMap.put(LEAVES, extractNames(env.getLeaves()));
configMap.put(GRAPH, env.getTSetGraph());
configMap.put("twister2.network.buffer.size", 32000);
configMap.put("twister2.network.sendBuffer.count", options.getParallelism());
LOG.warning("Twister2 Local Mode currently only supports single worker");
} else {
jobConfig.put(SIDEINPUTS, extractNames(env.getSideInputs()));
jobConfig.put(LEAVES, extractNames(env.getLeaves()));
jobConfig.put(GRAPH, env.getTSetGraph());
}
Config config = ResourceAllocator.loadConfig(configMap);
int workers = options.getParallelism();
Twister2Job twister2Job =
Twister2Job.newBuilder()
.setJobName(options.getJobName())
.setWorkerClass(BeamBatchWorker.class)
.addComputeResource(options.getWorkerCPUs(), options.getRamMegaBytes(), workers)
.setConfig(jobConfig)
.build();
Twister2JobState jobState;
if (isLocalMode(options)) {
jobState = LocalSubmitter.submitJob(twister2Job, config);
} else {
jobState = Twister2Submitter.submitJob(twister2Job, config);
}
Twister2PipelineResult result = new Twister2PipelineResult(jobState);
return result;
}
/** Check if the Runner is set to use Twister local mode or pointing to a deployment. */
public PipelineResult runTest(Pipeline pipeline) {
Twister2PipelineExecutionEnvironment env = new Twister2PipelineExecutionEnvironment(options);
LOG.info("Translating pipeline to Twister2 program.");
pipeline.replaceAll(getDefaultOverrides());
SplittableParDo.validateNoPrimitiveReads(pipeline);
env.translate(pipeline);
setupSystemTest(options);
Map configMap = new HashMap();
configMap.put(SIDEINPUTS, extractNames(env.getSideInputs()));
configMap.put(LEAVES, extractNames(env.getLeaves()));
configMap.put(GRAPH, env.getTSetGraph());
configMap.put("twister2.network.buffer.size", 32000);
configMap.put("twister2.network.sendBuffer.count", options.getParallelism());
Config config = ResourceAllocator.loadConfig(configMap);
JobConfig jobConfig = new JobConfig();
int workers = options.getParallelism();
Twister2Job twister2Job =
Twister2Job.newBuilder()
.setJobName(options.getJobName())
.setWorkerClass(BeamBatchWorker.class)
.addComputeResource(options.getWorkerCPUs(), options.getRamMegaBytes(), workers)
.setConfig(jobConfig)
.build();
Twister2JobState jobState = LocalSubmitter.submitJob(twister2Job, config);
Twister2PipelineResult result = new Twister2PipelineResult(jobState);
if (result.state == PipelineResult.State.FAILED) {
throw new RuntimeException("Pipeline execution failed", jobState.getCause());
}
return result;
}
private void setupSystem(Twister2PipelineOptions options) {
prepareFilesToStage(options);
zipFilesToStage(options);
System.setProperty("cluster_type", options.getClusterType());
System.setProperty("job_file", options.getJobFileZip());
System.setProperty("job_type", options.getJobType());
if (isLocalMode(options)) {
System.setProperty("twister2_home", System.getProperty("java.io.tmpdir"));
System.setProperty("config_dir", System.getProperty("java.io.tmpdir") + "/conf/");
} else {
System.setProperty("twister2_home", options.getTwister2Home());
System.setProperty("config_dir", options.getTwister2Home() + "/conf/");
File cDir = new File(System.getProperty("config_dir"), options.getClusterType());
String[] filesList =
new String[] {
"core.yaml", "network.yaml", "data.yaml", "resource.yaml", "task.yaml",
};
for (String file : filesList) {
File toCheck = new File(cDir, file);
if (!toCheck.exists()) {
throw new Twister2RuntimeException(
"Couldn't find " + file + " in config directory specified.");
}
}
FileInputStream fis = null;
try {
fis = new FileInputStream(new File(cDir, "logger.properties"));
LogManager.getLogManager().readConfiguration(fis);
fis.close();
} catch (IOException e) {
LOG.warning("Couldn't load logging configuration");
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
LOG.info(e.getMessage());
}
}
}
}
}
private void setupSystemTest(Twister2PipelineOptions options) {
prepareFilesToStage(options);
zipFilesToStage(options);
System.setProperty("cluster_type", options.getClusterType());
System.setProperty("twister2_home", System.getProperty("java.io.tmpdir"));
System.setProperty("job_file", options.getJobFileZip());
System.setProperty("job_type", options.getJobType());
}
private Set<String> extractNames(Set<TSet> leaves) {
Set<String> results = new HashSet<>();
for (TSet leaf : leaves) {
results.add(leaf.getId());
}
return results;
}
private Map<String, String> extractNames(Map<String, BatchTSet<?>> sideInputs) {
Map<String, String> results = new LinkedHashMap<>();
for (Map.Entry<String, BatchTSet<?>> entry : sideInputs.entrySet()) {
results.put(entry.getKey(), entry.getValue().getId());
}
return results;
}
/**
* Classpath contains non jar files (eg. directories with .class files or empty directories) will
* cause exception in running log.
*/
private void prepareFilesToStage(Twister2PipelineOptions options) {
List<String> filesToStage =
options.getFilesToStage().stream()
.map(File::new)
.filter(File::exists)
.map(
file -> {
return file.getAbsolutePath();
})
.collect(Collectors.toList());
options.setFilesToStage(
PipelineResources.prepareFilesForStaging(
filesToStage,
MoreObjects.firstNonNull(
options.getTempLocation(), System.getProperty("java.io.tmpdir"))));
}
/**
* creates a single zip file from all the jar files that are listed as files to stage in options.
*
* @param options
*/
private void zipFilesToStage(Twister2PipelineOptions options) {
File zipFile = null;
Set<String> jarSet = new HashSet<>();
List<String> filesToStage = options.getFilesToStage();
List<String> trimmed = new ArrayList<>();
for (String file : filesToStage) {
if (!file.contains("/org/twister2")) {
trimmed.add(file);
}
}
FileInputStream fis = null;
try {
zipFile = File.createTempFile("twister2-", ".zip");
FileOutputStream fos = new FileOutputStream(zipFile);
ZipOutputStream zipOut = new ZipOutputStream(fos);
zipOut.putNextEntry(new ZipEntry("lib/"));
for (String srcFile : trimmed) {
File fileToZip = new File(srcFile);
if (!jarSet.contains(fileToZip.getName())) {
jarSet.add(fileToZip.getName());
} else {
continue;
}
fis = new FileInputStream(fileToZip);
ZipEntry zipEntry = new ZipEntry("lib/" + fileToZip.getName());
zipOut.putNextEntry(zipEntry);
byte[] bytes = new byte[1024];
int length;
while ((length = fis.read(bytes)) >= 0) {
zipOut.write(bytes, 0, length);
}
fis.close();
}
zipOut.close();
fos.close();
zipFile.deleteOnExit();
} catch (FileNotFoundException e) {
LOG.info(e.getMessage());
} catch (IOException e) {
LOG.info(e.getMessage());
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
LOG.info(e.getMessage());
}
}
}
if (zipFile != null) {
options.setJobFileZip(zipFile.getPath());
}
}
private static List<PTransformOverride> getDefaultOverrides() {
List<PTransformOverride> overrides =
ImmutableList.<PTransformOverride>builder()
.add(
PTransformOverride.of(
PTransformMatchers.splittableParDo(), new SplittableParDo.OverrideFactory()))
.add(
PTransformOverride.of(
PTransformMatchers.urnEqualTo(
PTransformTranslation.SPLITTABLE_PROCESS_KEYED_URN),
new SplittableParDoNaiveBounded.OverrideFactory()))
.build();
return overrides;
}
}
|
It looks there is already existing `StorageAccountSkuType.STANDARD_GRS`.
|
public Mono<FunctionApp> createAsync() {
if (this.isInCreateMode()) {
if (inner().serverFarmId() == null) {
withNewConsumptionPlan();
}
if (currentStorageAccount == null && storageAccountToSet == null && storageAccountCreatable == null) {
withNewStorageAccount(
this.manager().sdkContext().randomResourceName(name(), 20),
StorageAccountSkuType.fromSkuName(com.azure.resourcemanager.storage.models.SkuName.STANDARD_GRS));
}
}
return super.createAsync();
}
|
StorageAccountSkuType.fromSkuName(com.azure.resourcemanager.storage.models.SkuName.STANDARD_GRS));
|
public Mono<FunctionApp> createAsync() {
if (this.isInCreateMode()) {
if (inner().serverFarmId() == null) {
withNewConsumptionPlan();
}
if (currentStorageAccount == null && storageAccountToSet == null && storageAccountCreatable == null) {
withNewStorageAccount(
this.manager().sdkContext().randomResourceName(name(), 20),
StorageAccountSkuType.STANDARD_GRS);
}
}
return super.createAsync();
}
|
class FunctionAppImpl
extends AppServiceBaseImpl<
FunctionApp, FunctionAppImpl, FunctionApp.DefinitionStages.WithCreate, FunctionApp.Update>
implements FunctionApp,
FunctionApp.Definition,
FunctionApp.DefinitionStages.NewAppServicePlanWithGroup,
FunctionApp.DefinitionStages.ExistingLinuxPlanWithGroup,
FunctionApp.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private static final String SETTING_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String SETTING_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING =
"WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String SETTING_WEBSITE_CONTENTSHARE = "WEBSITE_CONTENTSHARE";
private static final String SETTING_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String SETTING_WEB_JOBS_DASHBOARD = "AzureWebJobsDashboard";
private Creatable<StorageAccount> storageAccountCreatable;
private StorageAccount storageAccountToSet;
private StorageAccount currentStorageAccount;
private final FunctionAppKeyService functionAppKeyService;
private FunctionService functionService;
private FunctionDeploymentSlots deploymentSlots;
private String functionAppKeyServiceHost;
private String functionServiceHost;
FunctionAppImpl(
final String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
functionAppKeyServiceHost = manager.environment().getResourceManagerEndpoint();
functionAppKeyService = RestProxy.create(FunctionAppKeyService.class, manager.httpPipeline());
if (!isInCreateMode()) {
initializeFunctionService();
}
}
private void initializeFunctionService() {
if (functionService == null) {
UrlBuilder urlBuilder = UrlBuilder.parse(this.defaultHostname());
String baseUrl;
if (urlBuilder.getScheme() == null) {
urlBuilder.setScheme("https");
}
try {
baseUrl = urlBuilder.toUrl().toString();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0, count = manager().httpPipeline().getPolicyCount(); i < count; ++i) {
HttpPipelinePolicy policy = manager().httpPipeline().getPolicy(i);
if (!(policy instanceof AuthenticationPolicy)
&& !(policy instanceof ProviderRegistrationPolicy)
&& !(policy instanceof AuxiliaryAuthenticationPolicy)) {
policies.add(policy);
}
}
policies.add(new FunctionAuthenticationPolicy(this));
HttpPipeline httpPipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(manager().httpPipeline().getHttpClient())
.build();
functionServiceHost = baseUrl;
functionService =
RestProxy.create(FunctionService.class, httpPipeline, new AzureJacksonAdapter());
}
}
@Override
public void setInner(SiteInner innerObject) {
super.setInner(innerObject);
}
@Override
public FunctionDeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new FunctionDeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public FunctionAppImpl withNewConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withRuntime(String runtime) {
return withAppSetting(SETTING_FUNCTIONS_WORKER_RUNTIME, runtime);
}
@Override
public FunctionAppImpl withRuntimeVersion(String version) {
return withAppSetting(SETTING_FUNCTIONS_EXTENSION_VERSION, version.startsWith("~") ? version : "~" + version);
}
@Override
public FunctionAppImpl withLatestRuntimeVersion() {
return withRuntimeVersion("latest");
}
@Override
Mono<Indexable> submitAppSettings() {
if (storageAccountCreatable != null && this.taskResult(storageAccountCreatable.key()) != null) {
storageAccountToSet = this.taskResult(storageAccountCreatable.key());
}
if (storageAccountToSet == null) {
return super.submitAppSettings();
} else {
return Flux
.concat(
storageAccountToSet
.getKeysAsync()
.map(storageAccountKeys -> storageAccountKeys.get(0))
.zipWith(
this.manager().appServicePlans().getByIdAsync(this.appServicePlanId()),
(StorageAccountKey storageAccountKey, AppServicePlan appServicePlan) -> {
String connectionString = com.azure.resourcemanager.resources.fluentcore.utils.Utils
.getStorageConnectionString(storageAccountToSet.name(), storageAccountKey.value(),
manager().environment());
addAppSettingIfNotModified(SETTING_WEB_JOBS_STORAGE, connectionString);
addAppSettingIfNotModified(SETTING_WEB_JOBS_DASHBOARD, connectionString);
if (OperatingSystem.WINDOWS.equals(operatingSystem())
&&
(appServicePlan == null
|| isConsumptionOrPremiumAppServicePlan(appServicePlan.pricingTier()))) {
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING, connectionString);
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTSHARE,
this.manager().sdkContext().randomResourceName(name(), 32));
}
return FunctionAppImpl.super.submitAppSettings();
}))
.last()
.then(
Mono
.fromCallable(
() -> {
currentStorageAccount = storageAccountToSet;
storageAccountToSet = null;
storageAccountCreatable = null;
return this;
}));
}
}
@Override
public OperatingSystem operatingSystem() {
return (inner().reserved() == null || !inner().reserved()) ? OperatingSystem.WINDOWS : OperatingSystem.LINUX;
}
private void addAppSettingIfNotModified(String key, String value) {
if (!appSettingModified(key)) {
withAppSetting(key, value);
}
}
private boolean appSettingModified(String key) {
return (appSettingsToAdd != null && appSettingsToAdd.containsKey(key))
|| (appSettingsToRemove != null && appSettingsToRemove.contains(key));
}
private static boolean isConsumptionOrPremiumAppServicePlan(PricingTier pricingTier) {
if (pricingTier == null || pricingTier.toSkuDescription() == null) {
return true;
}
SkuDescription description = pricingTier.toSkuDescription();
return SkuName.DYNAMIC.toString().equalsIgnoreCase(description.tier())
|| SkuName.ELASTIC_PREMIUM.toString().equalsIgnoreCase(description.tier());
}
@Override
FunctionAppImpl withNewAppServicePlan(OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
FunctionAppImpl withNewAppServicePlan(
String appServicePlan, OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlan, operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
public FunctionAppImpl withExistingAppServicePlan(AppServicePlan appServicePlan) {
super.withExistingAppServicePlan(appServicePlan);
return autoSetAlwaysOn(appServicePlan.pricingTier());
}
private FunctionAppImpl autoSetAlwaysOn(PricingTier pricingTier) {
SkuDescription description = pricingTier.toSkuDescription();
if (description.tier().equalsIgnoreCase(SkuName.BASIC.toString())
|| description.tier().equalsIgnoreCase(SkuName.STANDARD.toString())
|| description.tier().equalsIgnoreCase(SkuName.PREMIUM.toString())
|| description.tier().equalsIgnoreCase(SkuName.PREMIUM_V2.toString())) {
return withWebAppAlwaysOn(true);
} else {
return withWebAppAlwaysOn(false);
}
}
@Override
public FunctionAppImpl withNewStorageAccount(String name, StorageAccountSkuType sku) {
StorageAccount.DefinitionStages.WithGroup storageDefine =
manager().storageManager().storageAccounts().define(name).withRegion(regionName());
if (super.creatableGroup != null && isInCreateMode()) {
storageAccountCreatable =
storageDefine.withNewResourceGroup(super.creatableGroup).withGeneralPurposeAccountKind().withSku(sku);
} else {
storageAccountCreatable =
storageDefine
.withExistingResourceGroup(resourceGroupName())
.withGeneralPurposeAccountKind()
.withSku(sku);
}
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withNewStorageAccount(Creatable<StorageAccount> storageAccount) {
storageAccountCreatable = storageAccount;
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withExistingStorageAccount(StorageAccount storageAccount) {
this.storageAccountToSet = storageAccount;
return this;
}
@Override
public FunctionAppImpl withDailyUsageQuota(int quota) {
inner().withDailyMemoryTimeQuota(quota);
return this;
}
@Override
public FunctionAppImpl withoutDailyUsageQuota() {
return withDailyUsageQuota(0);
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(Creatable<AppServicePlan> appServicePlanCreatable) {
super.withNewAppServicePlan(appServicePlanCreatable);
if (appServicePlanCreatable instanceof AppServicePlan) {
this.autoSetAlwaysOn(((AppServicePlan) appServicePlanCreatable).pricingTier());
}
return this;
}
@Override
public FunctionAppImpl withExistingLinuxAppServicePlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan).autoSetAlwaysOn(appServicePlan.pricingTier());
}
@Override
public FunctionAppImpl withBuiltInImage(final FunctionRuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
withRuntime(runtimeStack.runtime());
withRuntimeVersion(runtimeStack.version());
siteConfig.withLinuxFxVersion(runtimeStack.getLinuxFxVersion());
return this;
}
@Override
public FunctionAppImpl withPublicDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateRegistryImage(String imageAndTag, String serverUrl) {
ensureLinuxPlan();
return super.withPrivateRegistryImage(imageAndTag, serverUrl);
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
protected OperatingSystem appServicePlanOperatingSystem(AppServicePlan appServicePlan) {
return (appServicePlan.inner().reserved() == null || !appServicePlan.inner().reserved())
? OperatingSystem.WINDOWS
: OperatingSystem.LINUX;
}
@Override
public StorageAccount storageAccount() {
return currentStorageAccount;
}
@Override
public String getMasterKey() {
return getMasterKeyAsync().block();
}
@Override
public Mono<String> getMasterKeyAsync() {
return FluxUtil
.withContext(
context ->
functionAppKeyService
.listKeys(
functionAppKeyServiceHost,
resourceGroupName(),
name(),
manager().subscriptionId(),
"2019-08-01"))
.map(ListKeysResult::getMasterKey)
.subscriberContext(
context -> context.putAll(FluxUtil.toReactorContext(this.manager().inner().getContext())));
}
@Override
public PagedIterable<FunctionEnvelope> listFunctions() {
return this.manager().functionApps().listFunctions(resourceGroupName(), name());
}
@Override
public Map<String, String> listFunctionKeys(String functionName) {
return listFunctionKeysAsync(functionName).block();
}
@Override
public Mono<Map<String, String>> listFunctionKeysAsync(final String functionName) {
return functionService
.listFunctionKeys(functionServiceHost, functionName)
.map(
result -> {
Map<String, String> keys = new HashMap<>();
if (result.keys != null) {
for (NameValuePair pair : result.keys) {
keys.put(pair.name(), pair.value());
}
}
return keys;
});
}
@Override
public NameValuePair addFunctionKey(String functionName, String keyName, String keyValue) {
return addFunctionKeyAsync(functionName, keyName, keyValue).block();
}
@Override
public Mono<NameValuePair> addFunctionKeyAsync(String functionName, String keyName, String keyValue) {
if (keyValue != null) {
return functionService
.addFunctionKey(
functionServiceHost,
functionName,
keyName,
new NameValuePair().withName(keyName).withValue(keyValue));
} else {
return functionService.generateFunctionKey(functionServiceHost, functionName, keyName);
}
}
@Override
public void removeFunctionKey(String functionName, String keyName) {
removeFunctionKeyAsync(functionName, keyName).block();
}
@Override
public Mono<Void> removeFunctionKeyAsync(String functionName, String keyName) {
return functionService.deleteFunctionKey(functionServiceHost, functionName, keyName);
}
@Override
public void triggerFunction(String functionName, Object payload) {
triggerFunctionAsync(functionName, payload).block();
}
@Override
public Mono<Void> triggerFunctionAsync(String functionName, Object payload) {
return functionService.triggerFunction(functionServiceHost, functionName, payload);
}
@Override
public void syncTriggers() {
syncTriggersAsync().block();
}
@Override
public Mono<Void> syncTriggersAsync() {
return manager()
.inner()
.getWebApps()
.syncFunctionTriggersAsync(resourceGroupName(), name())
.onErrorResume(
throwable -> {
if (throwable instanceof ManagementException
&& ((ManagementException) throwable).getResponse().getStatusCode() == 200) {
return Mono.empty();
} else {
return Mono.error(throwable);
}
});
}
@Override
public Flux<String> streamApplicationLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamApplicationLogsAsync());
}
@Override
public Flux<String> streamHttpLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamHttpLogsAsync());
}
@Override
public Flux<String> streamTraceLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamTraceLogsAsync());
}
@Override
public Flux<String> streamDeploymentLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamDeploymentLogsAsync());
}
@Override
public Flux<String> streamAllLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamAllLogsAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile) {
return kuduClient.zipDeployAsync(zipFile);
}
@Override
public void zipDeploy(InputStream zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
@Override
public Mono<Void> afterPostRunAsync(final boolean isGroupFaulted) {
if (!isGroupFaulted) {
initializeFunctionService();
}
return super.afterPostRunAsync(isGroupFaulted);
}
private static class ListKeysResult {
@JsonProperty("masterKey")
private String masterKey;
@JsonProperty("functionKeys")
private Map<String, String> functionKeys;
@JsonProperty("systemKeys")
private Map<String, String> systemKeys;
public String getMasterKey() {
return masterKey;
}
}
@Host("{$host}")
@ServiceInterface(name = "FunctionAppKeyService")
private interface FunctionAppKeyService {
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps listKeys"
})
@Post(
"subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}"
+ "/host/default/listkeys")
Mono<ListKeysResult> listKeys(
@HostParam("$host") String host,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("name") String name,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion);
}
@Host("{$host}")
@ServiceInterface(name = "FunctionService")
private interface FunctionService {
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps listFunctionKeys"
})
@Get("admin/functions/{name}/keys")
Mono<FunctionKeyListResult> listFunctionKeys(
@HostParam("$host") String host, @PathParam("name") String functionName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps addFunctionKey"
})
@Put("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> addFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName,
@BodyParam("application/json") NameValuePair key);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps generateFunctionKey"
})
@Post("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> generateFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps deleteFunctionKey"
})
@Delete("admin/functions/{name}/keys/{keyName}")
Mono<Void> deleteFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps ping"
})
@Post("admin/host/ping")
Mono<Void> ping(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps getHostStatus"
})
@Get("admin/host/status")
Mono<Void> getHostStatus(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps triggerFunction"
})
@Post("admin/functions/{name}")
Mono<Void> triggerFunction(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@BodyParam("application/json") Object payload);
}
private static class FunctionKeyListResult {
@JsonProperty("keys")
private List<NameValuePair> keys;
}
/*
private static final class FunctionCredential implements TokenCredential {
private final FunctionAppImpl functionApp;
private FunctionCredential(FunctionAppImpl functionApp) {
this.functionApp = functionApp;
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return functionApp.manager().inner().getWebApps()
.getFunctionsAdminTokenAsync(functionApp.resourceGroupName(), functionApp.name())
.map(token -> {
String jwt = new String(Base64.getUrlDecoder().decode(token.split("\\.")[1]));
Pattern pattern = Pattern.compile("\"exp\": *([0-9]+),");
Matcher matcher = pattern.matcher(jwt);
matcher.find();
long expire = Long.parseLong(matcher.group(1));
return new AccessToken(token, OffsetDateTime.ofInstant(
Instant.ofEpochMilli(expire), ZoneOffset.UTC));
});
}
}
*/
}
|
class FunctionAppImpl
extends AppServiceBaseImpl<
FunctionApp, FunctionAppImpl, FunctionApp.DefinitionStages.WithCreate, FunctionApp.Update>
implements FunctionApp,
FunctionApp.Definition,
FunctionApp.DefinitionStages.NewAppServicePlanWithGroup,
FunctionApp.DefinitionStages.ExistingLinuxPlanWithGroup,
FunctionApp.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private static final String SETTING_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String SETTING_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING =
"WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String SETTING_WEBSITE_CONTENTSHARE = "WEBSITE_CONTENTSHARE";
private static final String SETTING_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String SETTING_WEB_JOBS_DASHBOARD = "AzureWebJobsDashboard";
private Creatable<StorageAccount> storageAccountCreatable;
private StorageAccount storageAccountToSet;
private StorageAccount currentStorageAccount;
private final FunctionAppKeyService functionAppKeyService;
private FunctionService functionService;
private FunctionDeploymentSlots deploymentSlots;
private String functionAppKeyServiceHost;
private String functionServiceHost;
FunctionAppImpl(
final String name,
SiteInner innerObject,
SiteConfigResourceInner siteConfig,
SiteLogsConfigInner logConfig,
AppServiceManager manager) {
super(name, innerObject, siteConfig, logConfig, manager);
functionAppKeyServiceHost = manager.environment().getResourceManagerEndpoint();
functionAppKeyService = RestProxy.create(FunctionAppKeyService.class, manager.httpPipeline());
if (!isInCreateMode()) {
initializeFunctionService();
}
}
private void initializeFunctionService() {
if (functionService == null) {
UrlBuilder urlBuilder = UrlBuilder.parse(this.defaultHostname());
String baseUrl;
if (urlBuilder.getScheme() == null) {
urlBuilder.setScheme("https");
}
try {
baseUrl = urlBuilder.toUrl().toString();
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
for (int i = 0, count = manager().httpPipeline().getPolicyCount(); i < count; ++i) {
HttpPipelinePolicy policy = manager().httpPipeline().getPolicy(i);
if (!(policy instanceof AuthenticationPolicy)
&& !(policy instanceof ProviderRegistrationPolicy)
&& !(policy instanceof AuxiliaryAuthenticationPolicy)) {
policies.add(policy);
}
}
policies.add(new FunctionAuthenticationPolicy(this));
HttpPipeline httpPipeline = new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(manager().httpPipeline().getHttpClient())
.build();
functionServiceHost = baseUrl;
functionService =
RestProxy.create(FunctionService.class, httpPipeline, new AzureJacksonAdapter());
}
}
@Override
public void setInner(SiteInner innerObject) {
super.setInner(innerObject);
}
@Override
public FunctionDeploymentSlots deploymentSlots() {
if (deploymentSlots == null) {
deploymentSlots = new FunctionDeploymentSlotsImpl(this);
}
return deploymentSlots;
}
@Override
public FunctionAppImpl withNewConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.WINDOWS, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withRuntime(String runtime) {
return withAppSetting(SETTING_FUNCTIONS_WORKER_RUNTIME, runtime);
}
@Override
public FunctionAppImpl withRuntimeVersion(String version) {
return withAppSetting(SETTING_FUNCTIONS_EXTENSION_VERSION, version.startsWith("~") ? version : "~" + version);
}
@Override
public FunctionAppImpl withLatestRuntimeVersion() {
return withRuntimeVersion("latest");
}
@Override
Mono<Indexable> submitAppSettings() {
if (storageAccountCreatable != null && this.taskResult(storageAccountCreatable.key()) != null) {
storageAccountToSet = this.taskResult(storageAccountCreatable.key());
}
if (storageAccountToSet == null) {
return super.submitAppSettings();
} else {
return Flux
.concat(
storageAccountToSet
.getKeysAsync()
.map(storageAccountKeys -> storageAccountKeys.get(0))
.zipWith(
this.manager().appServicePlans().getByIdAsync(this.appServicePlanId()),
(StorageAccountKey storageAccountKey, AppServicePlan appServicePlan) -> {
String connectionString = com.azure.resourcemanager.resources.fluentcore.utils.Utils
.getStorageConnectionString(storageAccountToSet.name(), storageAccountKey.value(),
manager().environment());
addAppSettingIfNotModified(SETTING_WEB_JOBS_STORAGE, connectionString);
addAppSettingIfNotModified(SETTING_WEB_JOBS_DASHBOARD, connectionString);
if (OperatingSystem.WINDOWS.equals(operatingSystem())
&&
(appServicePlan == null
|| isConsumptionOrPremiumAppServicePlan(appServicePlan.pricingTier()))) {
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTAZUREFILECONNECTIONSTRING, connectionString);
addAppSettingIfNotModified(
SETTING_WEBSITE_CONTENTSHARE,
this.manager().sdkContext().randomResourceName(name(), 32));
}
return FunctionAppImpl.super.submitAppSettings();
}))
.last()
.then(
Mono
.fromCallable(
() -> {
currentStorageAccount = storageAccountToSet;
storageAccountToSet = null;
storageAccountCreatable = null;
return this;
}));
}
}
@Override
public OperatingSystem operatingSystem() {
return (inner().reserved() == null || !inner().reserved()) ? OperatingSystem.WINDOWS : OperatingSystem.LINUX;
}
private void addAppSettingIfNotModified(String key, String value) {
if (!appSettingModified(key)) {
withAppSetting(key, value);
}
}
private boolean appSettingModified(String key) {
return (appSettingsToAdd != null && appSettingsToAdd.containsKey(key))
|| (appSettingsToRemove != null && appSettingsToRemove.contains(key));
}
private static boolean isConsumptionOrPremiumAppServicePlan(PricingTier pricingTier) {
if (pricingTier == null || pricingTier.toSkuDescription() == null) {
return true;
}
SkuDescription description = pricingTier.toSkuDescription();
return SkuName.DYNAMIC.toString().equalsIgnoreCase(description.tier())
|| SkuName.ELASTIC_PREMIUM.toString().equalsIgnoreCase(description.tier());
}
@Override
FunctionAppImpl withNewAppServicePlan(OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
FunctionAppImpl withNewAppServicePlan(
String appServicePlan, OperatingSystem operatingSystem, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlan, operatingSystem, pricingTier).autoSetAlwaysOn(pricingTier);
}
@Override
public FunctionAppImpl withExistingAppServicePlan(AppServicePlan appServicePlan) {
super.withExistingAppServicePlan(appServicePlan);
return autoSetAlwaysOn(appServicePlan.pricingTier());
}
private FunctionAppImpl autoSetAlwaysOn(PricingTier pricingTier) {
SkuDescription description = pricingTier.toSkuDescription();
if (description.tier().equalsIgnoreCase(SkuName.BASIC.toString())
|| description.tier().equalsIgnoreCase(SkuName.STANDARD.toString())
|| description.tier().equalsIgnoreCase(SkuName.PREMIUM.toString())
|| description.tier().equalsIgnoreCase(SkuName.PREMIUM_V2.toString())) {
return withWebAppAlwaysOn(true);
} else {
return withWebAppAlwaysOn(false);
}
}
@Override
public FunctionAppImpl withNewStorageAccount(String name, StorageAccountSkuType sku) {
StorageAccount.DefinitionStages.WithGroup storageDefine =
manager().storageManager().storageAccounts().define(name).withRegion(regionName());
if (super.creatableGroup != null && isInCreateMode()) {
storageAccountCreatable =
storageDefine.withNewResourceGroup(super.creatableGroup).withGeneralPurposeAccountKind().withSku(sku);
} else {
storageAccountCreatable =
storageDefine
.withExistingResourceGroup(resourceGroupName())
.withGeneralPurposeAccountKind()
.withSku(sku);
}
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withNewStorageAccount(Creatable<StorageAccount> storageAccount) {
storageAccountCreatable = storageAccount;
this.addDependency(storageAccountCreatable);
return this;
}
@Override
public FunctionAppImpl withExistingStorageAccount(StorageAccount storageAccount) {
this.storageAccountToSet = storageAccount;
return this;
}
@Override
public FunctionAppImpl withDailyUsageQuota(int quota) {
inner().withDailyMemoryTimeQuota(quota);
return this;
}
@Override
public FunctionAppImpl withoutDailyUsageQuota() {
return withDailyUsageQuota(0);
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan() {
return withNewAppServicePlan(OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxConsumptionPlan(String appServicePlanName) {
return withNewAppServicePlan(
appServicePlanName, OperatingSystem.LINUX, new PricingTier(SkuName.DYNAMIC.toString(), "Y1"));
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(PricingTier pricingTier) {
return super.withNewAppServicePlan(OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(String appServicePlanName, PricingTier pricingTier) {
return super.withNewAppServicePlan(appServicePlanName, OperatingSystem.LINUX, pricingTier);
}
@Override
public FunctionAppImpl withNewLinuxAppServicePlan(Creatable<AppServicePlan> appServicePlanCreatable) {
super.withNewAppServicePlan(appServicePlanCreatable);
if (appServicePlanCreatable instanceof AppServicePlan) {
this.autoSetAlwaysOn(((AppServicePlan) appServicePlanCreatable).pricingTier());
}
return this;
}
@Override
public FunctionAppImpl withExistingLinuxAppServicePlan(AppServicePlan appServicePlan) {
return super.withExistingAppServicePlan(appServicePlan).autoSetAlwaysOn(appServicePlan.pricingTier());
}
@Override
public FunctionAppImpl withBuiltInImage(final FunctionRuntimeStack runtimeStack) {
ensureLinuxPlan();
cleanUpContainerSettings();
if (siteConfig == null) {
siteConfig = new SiteConfigResourceInner();
}
withRuntime(runtimeStack.runtime());
withRuntimeVersion(runtimeStack.version());
siteConfig.withLinuxFxVersion(runtimeStack.getLinuxFxVersion());
return this;
}
@Override
public FunctionAppImpl withPublicDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateDockerHubImage(String imageAndTag) {
ensureLinuxPlan();
return super.withPublicDockerHubImage(imageAndTag);
}
@Override
public FunctionAppImpl withPrivateRegistryImage(String imageAndTag, String serverUrl) {
ensureLinuxPlan();
return super.withPrivateRegistryImage(imageAndTag, serverUrl);
}
@Override
protected void cleanUpContainerSettings() {
if (siteConfig != null && siteConfig.linuxFxVersion() != null) {
siteConfig.withLinuxFxVersion(null);
}
withoutAppSetting(SETTING_DOCKER_IMAGE);
withoutAppSetting(SETTING_REGISTRY_SERVER);
withoutAppSetting(SETTING_REGISTRY_USERNAME);
withoutAppSetting(SETTING_REGISTRY_PASSWORD);
}
@Override
protected OperatingSystem appServicePlanOperatingSystem(AppServicePlan appServicePlan) {
return (appServicePlan.inner().reserved() == null || !appServicePlan.inner().reserved())
? OperatingSystem.WINDOWS
: OperatingSystem.LINUX;
}
@Override
public StorageAccount storageAccount() {
return currentStorageAccount;
}
@Override
public String getMasterKey() {
return getMasterKeyAsync().block();
}
@Override
public Mono<String> getMasterKeyAsync() {
return FluxUtil
.withContext(
context ->
functionAppKeyService
.listKeys(
functionAppKeyServiceHost,
resourceGroupName(),
name(),
manager().subscriptionId(),
"2019-08-01"))
.map(ListKeysResult::getMasterKey)
.subscriberContext(
context -> context.putAll(FluxUtil.toReactorContext(this.manager().inner().getContext())));
}
@Override
public PagedIterable<FunctionEnvelope> listFunctions() {
return this.manager().functionApps().listFunctions(resourceGroupName(), name());
}
@Override
public Map<String, String> listFunctionKeys(String functionName) {
return listFunctionKeysAsync(functionName).block();
}
@Override
public Mono<Map<String, String>> listFunctionKeysAsync(final String functionName) {
return functionService
.listFunctionKeys(functionServiceHost, functionName)
.map(
result -> {
Map<String, String> keys = new HashMap<>();
if (result.keys != null) {
for (NameValuePair pair : result.keys) {
keys.put(pair.name(), pair.value());
}
}
return keys;
});
}
@Override
public NameValuePair addFunctionKey(String functionName, String keyName, String keyValue) {
return addFunctionKeyAsync(functionName, keyName, keyValue).block();
}
@Override
public Mono<NameValuePair> addFunctionKeyAsync(String functionName, String keyName, String keyValue) {
if (keyValue != null) {
return functionService
.addFunctionKey(
functionServiceHost,
functionName,
keyName,
new NameValuePair().withName(keyName).withValue(keyValue));
} else {
return functionService.generateFunctionKey(functionServiceHost, functionName, keyName);
}
}
@Override
public void removeFunctionKey(String functionName, String keyName) {
removeFunctionKeyAsync(functionName, keyName).block();
}
@Override
public Mono<Void> removeFunctionKeyAsync(String functionName, String keyName) {
return functionService.deleteFunctionKey(functionServiceHost, functionName, keyName);
}
@Override
public void triggerFunction(String functionName, Object payload) {
triggerFunctionAsync(functionName, payload).block();
}
@Override
public Mono<Void> triggerFunctionAsync(String functionName, Object payload) {
return functionService.triggerFunction(functionServiceHost, functionName, payload);
}
@Override
public void syncTriggers() {
syncTriggersAsync().block();
}
@Override
public Mono<Void> syncTriggersAsync() {
return manager()
.inner()
.getWebApps()
.syncFunctionTriggersAsync(resourceGroupName(), name())
.onErrorResume(
throwable -> {
if (throwable instanceof ManagementException
&& ((ManagementException) throwable).getResponse().getStatusCode() == 200) {
return Mono.empty();
} else {
return Mono.error(throwable);
}
});
}
@Override
public Flux<String> streamApplicationLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamApplicationLogsAsync());
}
@Override
public Flux<String> streamHttpLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamHttpLogsAsync());
}
@Override
public Flux<String> streamTraceLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamTraceLogsAsync());
}
@Override
public Flux<String> streamDeploymentLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamDeploymentLogsAsync());
}
@Override
public Flux<String> streamAllLogsAsync() {
return functionService
.ping(functionServiceHost)
.then(functionService.getHostStatus(functionServiceHost))
.thenMany(FunctionAppImpl.super.streamAllLogsAsync());
}
@Override
public Mono<Void> zipDeployAsync(File zipFile) {
try {
return kuduClient.zipDeployAsync(zipFile);
} catch (IOException e) {
return Mono.error(e);
}
}
@Override
public void zipDeploy(File zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
public Mono<Void> zipDeployAsync(InputStream zipFile) {
return kuduClient.zipDeployAsync(zipFile);
}
@Override
public void zipDeploy(InputStream zipFile) {
zipDeployAsync(zipFile).block();
}
@Override
@Override
public Mono<Void> afterPostRunAsync(final boolean isGroupFaulted) {
if (!isGroupFaulted) {
initializeFunctionService();
}
return super.afterPostRunAsync(isGroupFaulted);
}
private static class ListKeysResult {
@JsonProperty("masterKey")
private String masterKey;
@JsonProperty("functionKeys")
private Map<String, String> functionKeys;
@JsonProperty("systemKeys")
private Map<String, String> systemKeys;
public String getMasterKey() {
return masterKey;
}
}
@Host("{$host}")
@ServiceInterface(name = "FunctionAppKeyService")
private interface FunctionAppKeyService {
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps listKeys"
})
@Post(
"subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}"
+ "/host/default/listkeys")
Mono<ListKeysResult> listKeys(
@HostParam("$host") String host,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("name") String name,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("api-version") String apiVersion);
}
@Host("{$host}")
@ServiceInterface(name = "FunctionService")
private interface FunctionService {
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps listFunctionKeys"
})
@Get("admin/functions/{name}/keys")
Mono<FunctionKeyListResult> listFunctionKeys(
@HostParam("$host") String host, @PathParam("name") String functionName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps addFunctionKey"
})
@Put("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> addFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName,
@BodyParam("application/json") NameValuePair key);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps generateFunctionKey"
})
@Post("admin/functions/{name}/keys/{keyName}")
Mono<NameValuePair> generateFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps deleteFunctionKey"
})
@Delete("admin/functions/{name}/keys/{keyName}")
Mono<Void> deleteFunctionKey(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@PathParam("keyName") String keyName);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps ping"
})
@Post("admin/host/ping")
Mono<Void> ping(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps getHostStatus"
})
@Get("admin/host/status")
Mono<Void> getHostStatus(@HostParam("$host") String host);
@Headers({
"Content-Type: application/json; charset=utf-8",
"x-ms-logging-context: com.microsoft.azure.management.appservice.WebApps triggerFunction"
})
@Post("admin/functions/{name}")
Mono<Void> triggerFunction(
@HostParam("$host") String host,
@PathParam("name") String functionName,
@BodyParam("application/json") Object payload);
}
private static class FunctionKeyListResult {
@JsonProperty("keys")
private List<NameValuePair> keys;
}
/*
private static final class FunctionCredential implements TokenCredential {
private final FunctionAppImpl functionApp;
private FunctionCredential(FunctionAppImpl functionApp) {
this.functionApp = functionApp;
}
@Override
public Mono<AccessToken> getToken(TokenRequestContext request) {
return functionApp.manager().inner().getWebApps()
.getFunctionsAdminTokenAsync(functionApp.resourceGroupName(), functionApp.name())
.map(token -> {
String jwt = new String(Base64.getUrlDecoder().decode(token.split("\\.")[1]));
Pattern pattern = Pattern.compile("\"exp\": *([0-9]+),");
Matcher matcher = pattern.matcher(jwt);
matcher.find();
long expire = Long.parseLong(matcher.group(1));
return new AccessToken(token, OffsetDateTime.ofInstant(
Instant.ofEpochMilli(expire), ZoneOffset.UTC));
});
}
}
*/
}
|
yes, you'D of course have to rap the subList in a DSeries again, requiring a new constructor.
|
public void open(MetricConfig config) {
String apiKey = config.getString(API_KEY, null);
String proxyHost = config.getString(PROXY_HOST, null);
Integer proxyPort = config.getInteger(PROXY_PORT, 8080);
String rawDataCenter = config.getString(DATA_CENTER, "US");
maxMetricsPerRequestValue = config.getInteger(MAX_METRICS_PER_REQUEST, 2000);
DataCenter dataCenter = DataCenter.valueOf(rawDataCenter);
String tags = config.getString(TAGS, "");
client = new DatadogHttpClient(apiKey, proxyHost, proxyPort, dataCenter, true);
configTags = getTagsFromConfig(tags);
LOGGER.info("Configured DatadogHttpReporter with {tags={}, proxyHost={}, proxyPort={}, dataCenter={}, maxMetricsPerRequest={}", tags, proxyHost, proxyPort, dataCenter, maxMetricsPerRequestValue);
}
@Override
public void close() {
client.close();
LOGGER.info("Shut down DatadogHttpReporter");
}
@Override
public void report() {
try {
int totalGaugesSent = addGaugesAndUnregisterOnException();
DSeries request = new DSeries();
int currentCount = 0;
for (DCounter c : counters.values()) {
request.addCounter(c);
++currentCount;
if (currentCount % maxMetricsPerRequestValue == 0) {
client.send(request);
request = new DSeries();
}
}
for (DMeter m : meters.values()) {
request.addMeter(m);
++currentCount;
if (currentCount % maxMetricsPerRequestValue == 0) {
client.send(request);
request = new DSeries();
}
}
if (request.getSeries().size() > 0) {
client.send(request);
}
counters.values().forEach(DCounter::ackReport);
LOGGER.debug("Reported series with size {}.", totalGaugesSent + currentCount);
} catch (SocketTimeoutException e) {
LOGGER.warn("Failed reporting metrics to Datadog because of socket timeout: {}", e.getMessage());
} catch (Exception e) {
LOGGER.warn("Failed reporting metrics to Datadog.", e);
}
}
private int addGaugesAndUnregisterOnException() {
DSeries request = new DSeries();
int currentCount = 0;
int totalGauges = gauges.size();
List<Gauge> gaugesToRemove = new ArrayList<>();
for (Map.Entry<Gauge, DGauge> entry : gauges.entrySet()) {
DGauge g = entry.getValue();
try {
g.getMetricValue();
request.addGauge(g);
++currentCount;
if (currentCount % maxMetricsPerRequestValue == 0 || currentCount >= totalGauges) {
client.send(request);
request = new DSeries();
}
} catch (ClassCastException e) {
LOGGER.info("The metric {} will not be reported because only number types are supported by this reporter.", g.getMetric());
gaugesToRemove.add(entry.getKey());
} catch (Exception e) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("The metric {} will not be reported because it threw an exception.", g.getMetric(), e);
} else {
LOGGER.info("The metric {} will not be reported because it threw an exception.", g.getMetric());
}
gaugesToRemove.add(entry.getKey());
}
}
gaugesToRemove.forEach(gauges::remove);
return currentCount;
}
/**
* Get config tags from config 'metrics.reporter.dghttp.tags'.
*/
private List<String> getTagsFromConfig(String str) {
return Arrays.asList(str.split(","));
}
/**
* Get tags from MetricGroup
*/
private List<String> getTagsFromMetricGroup(MetricGroup metricGroup) {
List<String> tags = new ArrayList<>();
for (Map.Entry<String, String> entry: metricGroup.getAllVariables().entrySet()) {
if (!entry.getKey().equals(HOST_VARIABLE)) {
tags.add(getVariableName(entry.getKey()) + ":" + entry.getValue());
}
}
return tags;
}
private String getHostFromMetricGroup(MetricGroup metricGroup) {
return metricGroup.getAllVariables().get(HOST_VARIABLE);
}
/**
* Removes leading and trailing angle brackets.
*/
private String getVariableName(String str) {
return str.substring(1, str.length() - 1);
}
}
|
client.send(request);
|
public void open(MetricConfig config) {
String apiKey = config.getString(API_KEY, null);
String proxyHost = config.getString(PROXY_HOST, null);
Integer proxyPort = config.getInteger(PROXY_PORT, 8080);
String rawDataCenter = config.getString(DATA_CENTER, "US");
maxMetricsPerRequestValue = config.getInteger(MAX_METRICS_PER_REQUEST, 2000);
DataCenter dataCenter = DataCenter.valueOf(rawDataCenter);
String tags = config.getString(TAGS, "");
client = new DatadogHttpClient(apiKey, proxyHost, proxyPort, dataCenter, true);
configTags = getTagsFromConfig(tags);
LOGGER.info("Configured DatadogHttpReporter with {tags={}, proxyHost={}, proxyPort={}, dataCenter={}, maxMetricsPerRequest={}", tags, proxyHost, proxyPort, dataCenter, maxMetricsPerRequestValue);
}
@Override
public void close() {
client.close();
LOGGER.info("Shut down DatadogHttpReporter");
}
@Override
public void report() {
DSeries request = new DSeries();
addGaugesAndUnregisterOnException(request);
counters.values().forEach(request::addCounter);
meters.values().forEach(request::addMeter);
int totalMetrics = request.getSeries().size();
int fromIndex = 0;
while (fromIndex < totalMetrics) {
int toIndex = Math.min(fromIndex + maxMetricsPerRequestValue, totalMetrics);
try {
DSeries chunk = new DSeries(request.getSeries().subList(fromIndex, toIndex));
client.send(chunk);
chunk.getSeries().forEach(DMetric::ackReport);
LOGGER.debug("Reported series with size {}.", chunk.getSeries().size());
} catch (SocketTimeoutException e) {
LOGGER.warn("Failed reporting metrics to Datadog because of socket timeout: {}", e.getMessage());
} catch (Exception e) {
LOGGER.warn("Failed reporting metrics to Datadog.", e);
}
fromIndex = toIndex;
}
}
private void addGaugesAndUnregisterOnException(DSeries request) {
List<Gauge> gaugesToRemove = new ArrayList<>();
for (Map.Entry<Gauge, DGauge> entry : gauges.entrySet()) {
DGauge g = entry.getValue();
try {
g.getMetricValue();
request.addGauge(g);
} catch (ClassCastException e) {
LOGGER.info("The metric {} will not be reported because only number types are supported by this reporter.", g.getMetric());
gaugesToRemove.add(entry.getKey());
} catch (Exception e) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("The metric {} will not be reported because it threw an exception.", g.getMetric(), e);
} else {
LOGGER.info("The metric {} will not be reported because it threw an exception.", g.getMetric());
}
gaugesToRemove.add(entry.getKey());
}
}
gaugesToRemove.forEach(gauges::remove);
}
/**
* Get config tags from config 'metrics.reporter.dghttp.tags'.
*/
private List<String> getTagsFromConfig(String str) {
return Arrays.asList(str.split(","));
}
/**
* Get tags from MetricGroup
*/
private List<String> getTagsFromMetricGroup(MetricGroup metricGroup) {
List<String> tags = new ArrayList<>();
for (Map.Entry<String, String> entry: metricGroup.getAllVariables().entrySet()) {
if (!entry.getKey().equals(HOST_VARIABLE)) {
tags.add(getVariableName(entry.getKey()) + ":" + entry.getValue());
}
}
return tags;
}
private String getHostFromMetricGroup(MetricGroup metricGroup) {
return metricGroup.getAllVariables().get(HOST_VARIABLE);
}
/**
* Removes leading and trailing angle brackets.
*/
private String getVariableName(String str) {
return str.substring(1, str.length() - 1);
}
}
|
class DatadogHttpReporter implements MetricReporter, Scheduled {
private static final Logger LOGGER = LoggerFactory.getLogger(DatadogHttpReporter.class);
private static final String HOST_VARIABLE = "<host>";
private final Map<Gauge, DGauge> gauges = new ConcurrentHashMap<>();
private final Map<Counter, DCounter> counters = new ConcurrentHashMap<>();
private final Map<Meter, DMeter> meters = new ConcurrentHashMap<>();
private DatadogHttpClient client;
private List<String> configTags;
private int maxMetricsPerRequestValue;
private final Clock clock = () -> System.currentTimeMillis() / 1000L;
public static final String API_KEY = "apikey";
public static final String PROXY_HOST = "proxyHost";
public static final String PROXY_PORT = "proxyPort";
public static final String DATA_CENTER = "dataCenter";
public static final String TAGS = "tags";
public static final String MAX_METRICS_PER_REQUEST = "maxMetricsPerRequest";
@Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
final String name = group.getMetricIdentifier(metricName);
List<String> tags = new ArrayList<>(configTags);
tags.addAll(getTagsFromMetricGroup(group));
String host = getHostFromMetricGroup(group);
if (metric instanceof Counter) {
Counter c = (Counter) metric;
counters.put(c, new DCounter(c, name, host, tags, clock));
} else if (metric instanceof Gauge) {
Gauge g = (Gauge) metric;
gauges.put(g, new DGauge(g, name, host, tags, clock));
} else if (metric instanceof Meter) {
Meter m = (Meter) metric;
meters.put(m, new DMeter(m, name, host, tags, clock));
} else if (metric instanceof Histogram) {
LOGGER.warn("Cannot add {} because Datadog HTTP API doesn't support Histogram", metricName);
} else {
LOGGER.warn("Cannot add unknown metric type {}. This indicates that the reporter " +
"does not support this metric type.", metric.getClass().getName());
}
}
@Override
public void notifyOfRemovedMetric(Metric metric, String metricName, MetricGroup group) {
if (metric instanceof Counter) {
counters.remove(metric);
} else if (metric instanceof Gauge) {
gauges.remove(metric);
} else if (metric instanceof Meter) {
meters.remove(metric);
} else if (metric instanceof Histogram) {
} else {
LOGGER.warn("Cannot remove unknown metric type {}. This indicates that the reporter " +
"does not support this metric type.", metric.getClass().getName());
}
}
@Override
|
class DatadogHttpReporter implements MetricReporter, Scheduled {
private static final Logger LOGGER = LoggerFactory.getLogger(DatadogHttpReporter.class);
private static final String HOST_VARIABLE = "<host>";
private final Map<Gauge, DGauge> gauges = new ConcurrentHashMap<>();
private final Map<Counter, DCounter> counters = new ConcurrentHashMap<>();
private final Map<Meter, DMeter> meters = new ConcurrentHashMap<>();
private DatadogHttpClient client;
private List<String> configTags;
private int maxMetricsPerRequestValue;
private final Clock clock = () -> System.currentTimeMillis() / 1000L;
public static final String API_KEY = "apikey";
public static final String PROXY_HOST = "proxyHost";
public static final String PROXY_PORT = "proxyPort";
public static final String DATA_CENTER = "dataCenter";
public static final String TAGS = "tags";
public static final String MAX_METRICS_PER_REQUEST = "maxMetricsPerRequest";
@Override
public void notifyOfAddedMetric(Metric metric, String metricName, MetricGroup group) {
final String name = group.getMetricIdentifier(metricName);
List<String> tags = new ArrayList<>(configTags);
tags.addAll(getTagsFromMetricGroup(group));
String host = getHostFromMetricGroup(group);
if (metric instanceof Counter) {
Counter c = (Counter) metric;
counters.put(c, new DCounter(c, name, host, tags, clock));
} else if (metric instanceof Gauge) {
Gauge g = (Gauge) metric;
gauges.put(g, new DGauge(g, name, host, tags, clock));
} else if (metric instanceof Meter) {
Meter m = (Meter) metric;
meters.put(m, new DMeter(m, name, host, tags, clock));
} else if (metric instanceof Histogram) {
LOGGER.warn("Cannot add {} because Datadog HTTP API doesn't support Histogram", metricName);
} else {
LOGGER.warn("Cannot add unknown metric type {}. This indicates that the reporter " +
"does not support this metric type.", metric.getClass().getName());
}
}
@Override
public void notifyOfRemovedMetric(Metric metric, String metricName, MetricGroup group) {
if (metric instanceof Counter) {
counters.remove(metric);
} else if (metric instanceof Gauge) {
gauges.remove(metric);
} else if (metric instanceof Meter) {
meters.remove(metric);
} else if (metric instanceof Histogram) {
} else {
LOGGER.warn("Cannot remove unknown metric type {}. This indicates that the reporter " +
"does not support this metric type.", metric.getClass().getName());
}
}
@Override
|
Aren't we implementing record-record intersection here? If so shouldn't we change the check in L2808 to just `targetType.tag != TypeTags.MAP` @dulvinw is probably implementing record-record intersection though.
|
public Set<BType> expandAndGetMemberTypesRecursive(BType bType) {
Set<BType> memberTypes = new LinkedHashSet<>();
switch (bType.tag) {
case TypeTags.BYTE:
case TypeTags.INT:
memberTypes.add(symTable.intType);
memberTypes.add(symTable.byteType);
break;
case TypeTags.FINITE:
BFiniteType expType = (BFiniteType) bType;
expType.getValueSpace().forEach(value -> {
memberTypes.add(value.type);
});
break;
case TypeTags.UNION:
BUnionType unionType = (BUnionType) bType;
unionType.getMemberTypes().forEach(member -> {
memberTypes.addAll(expandAndGetMemberTypesRecursive(member));
});
break;
case TypeTags.ARRAY:
BType arrayElementType = ((BArrayType) bType).getElementType();
if (((BArrayType) bType).getSize() != -1) {
memberTypes.add(new BArrayType(arrayElementType));
}
if (arrayElementType.tag == TypeTags.UNION) {
Set<BType> elementUnionTypes = expandAndGetMemberTypesRecursive(arrayElementType);
elementUnionTypes.forEach(elementUnionType -> {
memberTypes.add(new BArrayType(elementUnionType));
});
}
memberTypes.add(bType);
break;
case TypeTags.MAP:
BType mapConstraintType = ((BMapType) bType).getConstraint();
if (mapConstraintType.tag == TypeTags.UNION) {
Set<BType> constraintUnionTypes = expandAndGetMemberTypesRecursive(mapConstraintType);
constraintUnionTypes.forEach(constraintUnionType -> {
memberTypes.add(new BMapType(TypeTags.MAP, constraintUnionType, symTable.mapType.tsymbol));
});
}
memberTypes.add(bType);
break;
default:
memberTypes.add(bType);
}
return memberTypes;
}
|
switch (bType.tag) {
|
public Set<BType> expandAndGetMemberTypesRecursive(BType bType) {
Set<BType> memberTypes = new LinkedHashSet<>();
switch (bType.tag) {
case TypeTags.BYTE:
case TypeTags.INT:
memberTypes.add(symTable.intType);
memberTypes.add(symTable.byteType);
break;
case TypeTags.FINITE:
BFiniteType expType = (BFiniteType) bType;
expType.getValueSpace().forEach(value -> {
memberTypes.add(value.type);
});
break;
case TypeTags.UNION:
BUnionType unionType = (BUnionType) bType;
unionType.getMemberTypes().forEach(member -> {
memberTypes.addAll(expandAndGetMemberTypesRecursive(member));
});
break;
case TypeTags.ARRAY:
BType arrayElementType = ((BArrayType) bType).getElementType();
if (((BArrayType) bType).getSize() != -1) {
memberTypes.add(new BArrayType(arrayElementType));
}
if (arrayElementType.tag == TypeTags.UNION) {
Set<BType> elementUnionTypes = expandAndGetMemberTypesRecursive(arrayElementType);
elementUnionTypes.forEach(elementUnionType -> {
memberTypes.add(new BArrayType(elementUnionType));
});
}
memberTypes.add(bType);
break;
case TypeTags.MAP:
BType mapConstraintType = ((BMapType) bType).getConstraint();
if (mapConstraintType.tag == TypeTags.UNION) {
Set<BType> constraintUnionTypes = expandAndGetMemberTypesRecursive(mapConstraintType);
constraintUnionTypes.forEach(constraintUnionType -> {
memberTypes.add(new BMapType(TypeTags.MAP, constraintUnionType, symTable.mapType.tsymbol));
});
}
memberTypes.add(bType);
break;
default:
memberTypes.add(bType);
}
return memberTypes;
}
|
class BSameTypeVisitor implements BTypeVisitor<BType, Boolean> {
Set<TypePair> unresolvedTypes;
BSameTypeVisitor(Set<TypePair> unresolvedTypes) {
this.unresolvedTypes = unresolvedTypes;
}
@Override
public Boolean visit(BType t, BType s) {
if (t == s) {
return true;
}
switch (t.tag) {
case TypeTags.INT:
case TypeTags.BYTE:
case TypeTags.FLOAT:
case TypeTags.DECIMAL:
case TypeTags.STRING:
case TypeTags.BOOLEAN:
return t.tag == s.tag
&& (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s));
case TypeTags.ANY:
case TypeTags.ANYDATA:
return t.tag == s.tag && hasSameReadonlyFlag(s, t)
&& (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s));
default:
break;
}
return false;
}
@Override
public Boolean visit(BBuiltInRefType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BAnyType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BAnydataType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BMapType t, BType s) {
if (s.tag != TypeTags.MAP || !hasSameReadonlyFlag(s, t)) {
return false;
}
BMapType sType = ((BMapType) s);
return isSameType(sType.constraint, t.constraint, this.unresolvedTypes);
}
@Override
public Boolean visit(BFutureType t, BType s) {
return s.tag == TypeTags.FUTURE && t.constraint.tag == ((BFutureType) s).constraint.tag;
}
@Override
public Boolean visit(BXMLType t, BType s) {
return visit((BBuiltInRefType) t, s);
}
@Override
public Boolean visit(BJSONType t, BType s) {
return s.tag == TypeTags.JSON && hasSameReadonlyFlag(s, t);
}
@Override
public Boolean visit(BArrayType t, BType s) {
return s.tag == TypeTags.ARRAY && hasSameReadonlyFlag(s, t) && isSameArrayType(s, t, this.unresolvedTypes);
}
@Override
public Boolean visit(BObjectType t, BType s) {
if (t == s) {
return true;
}
if (s.tag != TypeTags.OBJECT) {
return false;
}
return t.tsymbol.pkgID.equals(s.tsymbol.pkgID) && t.tsymbol.name.equals(s.tsymbol.name);
}
@Override
public Boolean visit(BRecordType t, BType s) {
if (t == s) {
return true;
}
if (s.tag != TypeTags.RECORD || !hasSameReadonlyFlag(s, t)) {
return false;
}
BRecordType source = (BRecordType) s;
if (source.fields.size() != t.fields.size()) {
return false;
}
for (BField sourceField : source.fields.values()) {
if (t.fields.containsKey(sourceField.name.value)) {
BField targetField = t.fields.get(sourceField.name.value);
if (isSameType(sourceField.type, targetField.type, this.unresolvedTypes) &&
hasSameOptionalFlag(sourceField.symbol, targetField.symbol) &&
(!Symbols.isFlagOn(targetField.symbol.flags, Flags.READONLY) ||
Symbols.isFlagOn(sourceField.symbol.flags, Flags.READONLY))) {
continue;
}
}
return false;
}
return isSameType(source.restFieldType, t.restFieldType, this.unresolvedTypes);
}
private boolean hasSameOptionalFlag(BVarSymbol s, BVarSymbol t) {
return ((s.flags & Flags.OPTIONAL) ^ (t.flags & Flags.OPTIONAL)) != Flags.OPTIONAL;
}
private boolean hasSameReadonlyFlag(BType source, BType target) {
return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY);
}
public Boolean visit(BTupleType t, BType s) {
if (s.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(s, t)) {
return false;
}
BTupleType source = (BTupleType) s;
if (source.tupleTypes.size() != t.tupleTypes.size()) {
return false;
}
for (int i = 0; i < source.tupleTypes.size(); i++) {
if (t.getTupleTypes().get(i) == symTable.noType) {
continue;
}
if (!isSameType(source.getTupleTypes().get(i), t.tupleTypes.get(i), this.unresolvedTypes)) {
return false;
}
}
return true;
}
@Override
public Boolean visit(BStreamType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BTableType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BInvokableType t, BType s) {
return s.tag == TypeTags.INVOKABLE && isSameFunctionType((BInvokableType) s, t, this.unresolvedTypes);
}
@Override
public Boolean visit(BUnionType tUnionType, BType s) {
if (s.tag != TypeTags.UNION || !hasSameReadonlyFlag(s, tUnionType)) {
return false;
}
BUnionType sUnionType = (BUnionType) s;
if (sUnionType.getMemberTypes().size()
!= tUnionType.getMemberTypes().size()) {
return false;
}
Set<BType> sourceTypes = new LinkedHashSet<>(sUnionType.getMemberTypes());
Set<BType> targetTypes = new LinkedHashSet<>(tUnionType.getMemberTypes());
boolean notSameType = sourceTypes
.stream()
.map(sT -> targetTypes
.stream()
.anyMatch(it -> isSameType(it, sT, this.unresolvedTypes)))
.anyMatch(foundSameType -> !foundSameType);
return !notSameType;
}
@Override
public Boolean visit(BIntersectionType tIntersectionType, BType s) {
if (s.tag != TypeTags.INTERSECTION || !hasSameReadonlyFlag(s, tIntersectionType)) {
return false;
}
BIntersectionType sIntersectionType = (BIntersectionType) s;
if (sIntersectionType.getConstituentTypes().size() != tIntersectionType.getConstituentTypes().size()) {
return false;
}
Set<BType> sourceTypes = new LinkedHashSet<>(sIntersectionType.getConstituentTypes());
Set<BType> targetTypes = new LinkedHashSet<>(tIntersectionType.getConstituentTypes());
for (BType sourceType : sourceTypes) {
boolean foundSameType = false;
for (BType targetType : targetTypes) {
if (isSameType(sourceType, targetType, this.unresolvedTypes)) {
foundSameType = true;
break;
}
}
if (!foundSameType) {
return false;
}
}
return true;
}
@Override
public Boolean visit(BErrorType t, BType s) {
if (s.tag != TypeTags.ERROR) {
return false;
}
BErrorType source = (BErrorType) s;
if (!source.typeIdSet.equals(t.typeIdSet)) {
return false;
}
if (source.detailType == t.detailType) {
return true;
}
return isSameType(source.detailType, t.detailType, this.unresolvedTypes);
}
@Override
public Boolean visit(BServiceType t, BType s) {
return t == s || t.tag == s.tag;
}
@Override
public Boolean visit(BTypedescType t, BType s) {
if (s.tag != TypeTags.TYPEDESC) {
return false;
}
BTypedescType sType = ((BTypedescType) s);
return isSameType(sType.constraint, t.constraint, this.unresolvedTypes);
}
@Override
public Boolean visit(BFiniteType t, BType s) {
return s == t;
}
@Override
public Boolean visit(BParameterizedType t, BType s) {
if (s.tag != TypeTags.PARAMETERIZED_TYPE) {
return false;
}
BParameterizedType sType = (BParameterizedType) s;
return isSameType(sType.paramValueType, t.paramValueType) && sType.paramSymbol.equals(t.paramSymbol);
}
}
|
class BSameTypeVisitor implements BTypeVisitor<BType, Boolean> {
Set<TypePair> unresolvedTypes;
BSameTypeVisitor(Set<TypePair> unresolvedTypes) {
this.unresolvedTypes = unresolvedTypes;
}
@Override
public Boolean visit(BType t, BType s) {
if (t == s) {
return true;
}
switch (t.tag) {
case TypeTags.INT:
case TypeTags.BYTE:
case TypeTags.FLOAT:
case TypeTags.DECIMAL:
case TypeTags.STRING:
case TypeTags.BOOLEAN:
return t.tag == s.tag
&& (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s));
case TypeTags.ANY:
case TypeTags.ANYDATA:
return t.tag == s.tag && hasSameReadonlyFlag(s, t)
&& (TypeParamAnalyzer.isTypeParam(t) || TypeParamAnalyzer.isTypeParam(s));
default:
break;
}
return false;
}
@Override
public Boolean visit(BBuiltInRefType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BAnyType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BAnydataType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BMapType t, BType s) {
if (s.tag != TypeTags.MAP || !hasSameReadonlyFlag(s, t)) {
return false;
}
BMapType sType = ((BMapType) s);
return isSameType(sType.constraint, t.constraint, this.unresolvedTypes);
}
@Override
public Boolean visit(BFutureType t, BType s) {
return s.tag == TypeTags.FUTURE && t.constraint.tag == ((BFutureType) s).constraint.tag;
}
@Override
public Boolean visit(BXMLType t, BType s) {
return visit((BBuiltInRefType) t, s);
}
@Override
public Boolean visit(BJSONType t, BType s) {
return s.tag == TypeTags.JSON && hasSameReadonlyFlag(s, t);
}
@Override
public Boolean visit(BArrayType t, BType s) {
return s.tag == TypeTags.ARRAY && hasSameReadonlyFlag(s, t) && isSameArrayType(s, t, this.unresolvedTypes);
}
@Override
public Boolean visit(BObjectType t, BType s) {
if (t == s) {
return true;
}
if (s.tag != TypeTags.OBJECT) {
return false;
}
return t.tsymbol.pkgID.equals(s.tsymbol.pkgID) && t.tsymbol.name.equals(s.tsymbol.name);
}
@Override
public Boolean visit(BRecordType t, BType s) {
if (t == s) {
return true;
}
if (s.tag != TypeTags.RECORD || !hasSameReadonlyFlag(s, t)) {
return false;
}
BRecordType source = (BRecordType) s;
if (source.fields.size() != t.fields.size()) {
return false;
}
for (BField sourceField : source.fields.values()) {
if (t.fields.containsKey(sourceField.name.value)) {
BField targetField = t.fields.get(sourceField.name.value);
if (isSameType(sourceField.type, targetField.type, this.unresolvedTypes) &&
hasSameOptionalFlag(sourceField.symbol, targetField.symbol) &&
(!Symbols.isFlagOn(targetField.symbol.flags, Flags.READONLY) ||
Symbols.isFlagOn(sourceField.symbol.flags, Flags.READONLY))) {
continue;
}
}
return false;
}
return isSameType(source.restFieldType, t.restFieldType, this.unresolvedTypes);
}
private boolean hasSameOptionalFlag(BVarSymbol s, BVarSymbol t) {
return ((s.flags & Flags.OPTIONAL) ^ (t.flags & Flags.OPTIONAL)) != Flags.OPTIONAL;
}
private boolean hasSameReadonlyFlag(BType source, BType target) {
return Symbols.isFlagOn(target.flags, Flags.READONLY) == Symbols.isFlagOn(source.flags, Flags.READONLY);
}
public Boolean visit(BTupleType t, BType s) {
if (s.tag != TypeTags.TUPLE || !hasSameReadonlyFlag(s, t)) {
return false;
}
BTupleType source = (BTupleType) s;
if (source.tupleTypes.size() != t.tupleTypes.size()) {
return false;
}
for (int i = 0; i < source.tupleTypes.size(); i++) {
if (t.getTupleTypes().get(i) == symTable.noType) {
continue;
}
if (!isSameType(source.getTupleTypes().get(i), t.tupleTypes.get(i), this.unresolvedTypes)) {
return false;
}
}
return true;
}
@Override
public Boolean visit(BStreamType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BTableType t, BType s) {
return t == s;
}
@Override
public Boolean visit(BInvokableType t, BType s) {
return s.tag == TypeTags.INVOKABLE && isSameFunctionType((BInvokableType) s, t, this.unresolvedTypes);
}
@Override
public Boolean visit(BUnionType tUnionType, BType s) {
if (s.tag != TypeTags.UNION || !hasSameReadonlyFlag(s, tUnionType)) {
return false;
}
BUnionType sUnionType = (BUnionType) s;
if (sUnionType.getMemberTypes().size()
!= tUnionType.getMemberTypes().size()) {
return false;
}
Set<BType> sourceTypes = new LinkedHashSet<>(sUnionType.getMemberTypes());
Set<BType> targetTypes = new LinkedHashSet<>(tUnionType.getMemberTypes());
boolean notSameType = sourceTypes
.stream()
.map(sT -> targetTypes
.stream()
.anyMatch(it -> isSameType(it, sT, this.unresolvedTypes)))
.anyMatch(foundSameType -> !foundSameType);
return !notSameType;
}
@Override
public Boolean visit(BIntersectionType tIntersectionType, BType s) {
if (s.tag != TypeTags.INTERSECTION || !hasSameReadonlyFlag(s, tIntersectionType)) {
return false;
}
BIntersectionType sIntersectionType = (BIntersectionType) s;
if (sIntersectionType.getConstituentTypes().size() != tIntersectionType.getConstituentTypes().size()) {
return false;
}
Set<BType> sourceTypes = new LinkedHashSet<>(sIntersectionType.getConstituentTypes());
Set<BType> targetTypes = new LinkedHashSet<>(tIntersectionType.getConstituentTypes());
for (BType sourceType : sourceTypes) {
boolean foundSameType = false;
for (BType targetType : targetTypes) {
if (isSameType(sourceType, targetType, this.unresolvedTypes)) {
foundSameType = true;
break;
}
}
if (!foundSameType) {
return false;
}
}
return true;
}
@Override
public Boolean visit(BErrorType t, BType s) {
if (s.tag != TypeTags.ERROR) {
return false;
}
BErrorType source = (BErrorType) s;
if (!source.typeIdSet.equals(t.typeIdSet)) {
return false;
}
if (source.detailType == t.detailType) {
return true;
}
return isSameType(source.detailType, t.detailType, this.unresolvedTypes);
}
@Override
public Boolean visit(BTypedescType t, BType s) {
if (s.tag != TypeTags.TYPEDESC) {
return false;
}
BTypedescType sType = ((BTypedescType) s);
return isSameType(sType.constraint, t.constraint, this.unresolvedTypes);
}
@Override
public Boolean visit(BFiniteType t, BType s) {
return s == t;
}
@Override
public Boolean visit(BParameterizedType t, BType s) {
if (s.tag != TypeTags.PARAMETERIZED_TYPE) {
return false;
}
BParameterizedType sType = (BParameterizedType) s;
return isSameType(sType.paramValueType, t.paramValueType) && sType.paramSymbol.equals(t.paramSymbol);
}
}
|
this is a double lookup in the map. Consider replacing with a single lookup
|
public ReadableState<Iterable<V>> get(K key) {
return new ReadableState<Iterable<V>>() {
final Object structuralKey = keyCoder.structuralValue(key);
@Override
public Iterable<V> read() {
KeyState keyState = keyStateMap.computeIfAbsent(structuralKey, k -> new KeyState(key));
if ((keyState.existence == KeyExistence.KNOWN_NONEXISTENT)
|| (allKeysKnown && keyState.existence == KeyExistence.UNKNOWN_EXISTENCE)) {
return Collections.emptyList();
}
if (localRemovals.contains(structuralKey)) {
if (localAdditions.containsKey(structuralKey)) {
return Iterables.unmodifiableIterable(localAdditions.get(structuralKey));
} else {
return Collections.emptyList();
}
}
if (keyState.valuesCached || complete) {
return Iterables.unmodifiableIterable(
Iterables.concat(keyState.values, localAdditions.get(structuralKey)));
}
Future<Iterable<V>> persistedData = getFutureForKey(key);
try (Closeable scope = scopedReadState()) {
final Iterable<V> persistedValues = persistedData.get();
if (Iterables.isEmpty(persistedValues)) {
Collection<V> local = localAdditions.get(structuralKey);
if (local.isEmpty()) {
keyState.existence = KeyExistence.KNOWN_NONEXISTENT;
return Collections.emptyList();
}
return Iterables.unmodifiableIterable(local);
}
if (persistedValues instanceof Weighted) {
keyState.existence = KeyExistence.KNOWN_EXIST;
keyState.valuesCached = true;
ConcatIterables<V> it = new ConcatIterables<>();
it.extendWith(persistedValues);
keyState.values = it;
}
return Iterables.unmodifiableIterable(
Iterables.concat(persistedValues, localAdditions.get(structuralKey)));
} catch (InterruptedException | ExecutionException | IOException e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
throw new RuntimeException("Unable to read Multimap state", e);
}
}
@Override
@SuppressWarnings("FutureReturnValueIgnored")
public ReadableState<Iterable<V>> readLater() {
WindmillMultimap.this.getFutureForKey(key);
return this;
}
};
}
|
if (localAdditions.containsKey(structuralKey)) {
|
Override
public Iterable<V> read() {
KeyState keyState = null;
if (allKeysKnown) {
keyState = keyStateMap.get(structuralKey);
if (keyState == null || keyState.existence == KeyExistence.UNKNOWN_EXISTENCE) {
if (keyState != null) keyStateMap.remove(structuralKey);
return Collections.emptyList();
}
} else {
keyState = keyStateMap.computeIfAbsent(structuralKey, k -> new KeyState(key));
}
if (keyState.existence == KeyExistence.KNOWN_NONEXISTENT) {
return Collections.emptyList();
}
Iterable<V> localNewValues =
Iterables.limit(keyState.localAdditions, keyState.localAdditions.size());
if (keyState.removedLocally) {
return Iterables.unmodifiableIterable(localNewValues);
}
if (keyState.valuesCached || complete) {
return Iterables.unmodifiableIterable(
Iterables.concat(
Iterables.limit(keyState.values, keyState.valuesSize), localNewValues));
}
Future<Iterable<V>> persistedData = necessaryKeyEntriesFromStorageFuture(key);
try (Closeable scope = scopedReadState()) {
final Iterable<V> persistedValues = persistedData.get();
if (Iterables.isEmpty(persistedValues)) {
if (keyState.localAdditions.isEmpty()) {
keyState.existence = KeyExistence.KNOWN_NONEXISTENT;
return Collections.emptyList();
}
return Iterables.unmodifiableIterable(localNewValues);
}
keyState.existence = KeyExistence.KNOWN_EXIST;
if (persistedValues instanceof Weighted) {
keyState.valuesCached = true;
ConcatIterables<V> it = new ConcatIterables<>();
it.extendWith(persistedValues);
keyState.values = it;
keyState.valuesSize = Iterables.size(persistedValues);
}
return Iterables.unmodifiableIterable(
Iterables.concat(persistedValues, localNewValues));
} catch (InterruptedException | ExecutionException | IOException e) {
if (e instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
throw new RuntimeException("Unable to read Multimap state", e);
}
}
|
class KeyState {
final K originalKey;
KeyExistence existence;
boolean valuesCached;
ConcatIterables<V> values;
KeyState(K originalKey) {
this.originalKey = originalKey;
existence = KeyExistence.UNKNOWN_EXISTENCE;
valuesCached = false;
values = new ConcatIterables<>();
}
}
|
class KeyState {
final K originalKey;
KeyExistence existence;
boolean valuesCached;
ConcatIterables<V> values;
int valuesSize;
List<V> localAdditions;
boolean removedLocally;
KeyState(K originalKey) {
this.originalKey = originalKey;
existence = KeyExistence.UNKNOWN_EXISTENCE;
valuesCached = complete;
values = new ConcatIterables<>();
valuesSize = 0;
localAdditions = Lists.newArrayList();
removedLocally = false;
}
}
|
And we can call `groupInformationFromGraph.setGroupIds(...)` and `groupInformationFromGraph.setGroupNames(...)` here instead of set in each test.
|
public void setup() {
groupInformationFromGraph = new GroupInformation();
allowedGroupIds = groupInformationFromGraph.getGroupsId();
allowedGroupNames = groupInformationFromGraph.getGroupsName();
this.autoCloseable = MockitoAnnotations.openMocks(this);
properties.setUserGroup(userGroup);
properties.setGraphMembershipUri("https:
Mockito.lenient().when(accessToken.getTokenValue()).thenReturn("fake-access-token");
userService = new AADOAuth2UserService(properties, graphClient);
}
|
allowedGroupIds = groupInformationFromGraph.getGroupsId();
|
public void setup() {
this.autoCloseable = MockitoAnnotations.openMocks(this);
GroupInformation groupInformationFromGraph = new GroupInformation();
Set<String> groupNamesFromGraph = new HashSet<>();
Set<String> groupIdsFromGraph = new HashSet<>();
groupNamesFromGraph.add("group1");
groupNamesFromGraph.add("group2");
groupIdsFromGraph.add(GROUP_ID_1);
groupIdsFromGraph.add(GROUP_ID_2);
groupInformationFromGraph.setGroupsIds(groupIdsFromGraph);
groupInformationFromGraph.setGroupsNames(groupNamesFromGraph);
properties.setUserGroup(userGroup);
properties.setGraphMembershipUri("https:
Mockito.lenient().when(accessToken.getTokenValue())
.thenReturn("fake-access-token");
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
}
|
class AADAccessTokenGroupRolesExtractionTest {
@Mock
private OAuth2AccessToken accessToken;
@Mock
private GraphClient graphClient;
private AADAuthenticationProperties properties = new AADAuthenticationProperties();
private AADAuthenticationProperties.UserGroupProperties userGroup =
new AADAuthenticationProperties.UserGroupProperties();
private AADOAuth2UserService userService;
private AutoCloseable autoCloseable;
private GroupInformation groupInformationFromGraph;
private Set<String> allowedGroupNames;
private Set<String> allowedGroupIds;
@BeforeEach
@AfterEach
public void close() throws Exception {
this.autoCloseable.close();
}
@Test
public void testGroupsName() {
allowedGroupNames.add("group1");
allowedGroupNames.add("group2");
List<String> customizeGroupName = new ArrayList<>();
customizeGroupName.add("group1");
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
userGroup.setAllowedGroupNames(customizeGroupName);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group5");
assertThat(groupRoles).hasSize(1);
}
@Test
public void testGroupsId() {
allowedGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
List<String> customizeGroupId = new ArrayList<>();
customizeGroupId.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
userGroup.setAllowedGroupIds(allowedGroupIds);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).contains("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
assertThat(groupRoles).doesNotContain("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194abaaa");
assertThat(groupRoles).hasSize(1);
}
@Test
public void testGroupsNameAndGroupsId() {
allowedGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
allowedGroupNames.add("group1");
Set<String> customizeGroupIds = new HashSet<>();
customizeGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
List<String> customizeGroupName = new ArrayList<>();
customizeGroupName.add("group1");
userGroup.setAllowedGroupIds(customizeGroupIds);
userGroup.setAllowedGroupNames(customizeGroupName);
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group5");
assertThat(groupRoles).contains("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
assertThat(groupRoles).doesNotContain("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194abaaa");
assertThat(groupRoles).hasSize(2);
}
@Test
public void testWithEnableFullList() {
allowedGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
allowedGroupIds.add("6eddcc22-a24a-4459-b036-b9d9fc0f0bc7");
allowedGroupNames.add("group1");
Set<String> customizeGroupIds = new HashSet<>();
customizeGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
List<String> customizeGroupName = new ArrayList<>();
customizeGroupName.add("group1");
userGroup.setAllowedGroupIds(customizeGroupIds);
userGroup.setAllowedGroupNames(customizeGroupName);
userGroup.setEnableFullList(true);
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(3);
assertThat(groupRoles).contains("ROLE_group1");
}
@Test
public void testWithoutEnableFullList() {
allowedGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
allowedGroupIds.add("6eddcc22-a24a-4459-b036-b9d9fc0f0bc7");
allowedGroupNames.add("group1");
allowedGroupNames.add("group2");
List<String> customizeGroupNames = new ArrayList<>();
Set<String> customizeGroupIds = new HashSet<>();
customizeGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
customizeGroupNames.add("group1");
userGroup.setEnableFullList(false);
userGroup.setAllowedGroupIds(customizeGroupIds);
userGroup.setAllowedGroupNames(customizeGroupNames);
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group5");
assertThat(groupRoles).contains("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
assertThat(groupRoles).doesNotContain("ROLE_d07c0bd6-4aab-45ac-b87c-23e8d00194abaaa");
assertThat(groupRoles).hasSize(2);
}
@Test
public void testAllGroupIds() {
allowedGroupIds.add("d07c0bd6-4aab-45ac-b87c-23e8d00194ab");
allowedGroupIds.add("6eddcc22-a24a-4459-b036-b9d9fc0f0bc7");
allowedGroupNames.add("group1");
allowedGroupNames.add("group2");
Set<String> customizeGroupIds = new HashSet<>();
customizeGroupIds.add("all");
List<String> customizeGroupName = new ArrayList<>();
customizeGroupName.add("group1");
userGroup.setAllowedGroupIds(customizeGroupIds);
userGroup.setAllowedGroupNames(customizeGroupName);
userGroup.setEnableFullList(true);
Mockito.lenient().when(graphClient.getGroupInformation(accessToken.getTokenValue()))
.thenReturn(groupInformationFromGraph);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(3);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group2");
}
}
|
class AADAccessTokenGroupRolesExtractionTest {
private static final String GROUP_ID_1 = "d07c0bd6-4aab-45ac-b87c-23e8d00194ab";
private static final String GROUP_ID_2 = "6eddcc22-a24a-4459-b036-b9d9fc0f0bc7";
private final AADAuthenticationProperties properties = new AADAuthenticationProperties();
private final AADAuthenticationProperties.UserGroupProperties userGroup =
new AADAuthenticationProperties.UserGroupProperties();
private AutoCloseable autoCloseable;
@Mock
private OAuth2AccessToken accessToken;
@Mock
private GraphClient graphClient;
@BeforeAll
@AfterEach
public void reset() {
userGroup.setAllowedGroupNames(Collections.emptyList());
userGroup.setAllowedGroupIds(Collections.emptySet());
userGroup.setEnableFullList(false);
}
@AfterAll
public void close() throws Exception {
this.autoCloseable.close();
}
@Test
public void testAllowedGroupsNames() {
List<String> allowedGroupNames = new ArrayList<>();
allowedGroupNames.add("group1");
userGroup.setAllowedGroupNames(allowedGroupNames);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(1);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group2");
}
@Test
public void testAllowedGroupsIds() {
Set<String> allowedGroupIds = new HashSet<>();
allowedGroupIds.add(GROUP_ID_1);
userGroup.setAllowedGroupIds(allowedGroupIds);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(1);
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_1);
assertThat(groupRoles).doesNotContain("ROLE_" + GROUP_ID_2);
}
@Test
public void testAllowedGroupsNamesAndAllowedGroupsIds() {
Set<String> allowedGroupIds = new HashSet<>();
allowedGroupIds.add(GROUP_ID_1);
List<String> allowedGroupNames = new ArrayList<>();
allowedGroupNames.add("group1");
userGroup.setAllowedGroupIds(allowedGroupIds);
userGroup.setAllowedGroupNames(allowedGroupNames);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(2);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group2");
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_1);
assertThat(groupRoles).doesNotContain("ROLE_" + GROUP_ID_2);
}
@Test
public void testWithEnableFullList() {
Set<String> allowedGroupIds = new HashSet<>();
allowedGroupIds.add(GROUP_ID_1);
List<String> allowedGroupNames = new ArrayList<>();
allowedGroupNames.add("group1");
userGroup.setAllowedGroupIds(allowedGroupIds);
userGroup.setAllowedGroupNames(allowedGroupNames);
userGroup.setEnableFullList(true);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(3);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_1);
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_2);
}
@Test
public void testWithoutEnableFullList() {
List<String> allowedGroupNames = new ArrayList<>();
Set<String> allowedGroupIds = new HashSet<>();
allowedGroupIds.add(GROUP_ID_1);
allowedGroupNames.add("group1");
userGroup.setEnableFullList(false);
userGroup.setAllowedGroupIds(allowedGroupIds);
userGroup.setAllowedGroupNames(allowedGroupNames);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(2);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group2");
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_1);
assertThat(groupRoles).doesNotContain("ROLE_" + GROUP_ID_2);
}
@Test
public void testAllowedGroupIdsAllWithoutEnableFullList() {
Set<String> allowedGroupIds = new HashSet<>();
allowedGroupIds.add("all");
List<String> allowedGroupNames = new ArrayList<>();
allowedGroupNames.add("group1");
userGroup.setAllowedGroupIds(allowedGroupIds);
userGroup.setAllowedGroupNames(allowedGroupNames);
userGroup.setEnableFullList(false);
AADOAuth2UserService userService = new AADOAuth2UserService(properties, graphClient);
Set<String> groupRoles = userService.extractGroupRolesFromAccessToken(accessToken);
assertThat(groupRoles).hasSize(3);
assertThat(groupRoles).contains("ROLE_group1");
assertThat(groupRoles).doesNotContain("ROLE_group2");
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_1);
assertThat(groupRoles).contains("ROLE_" + GROUP_ID_2);
}
@Test
public void testIllegalGroupIdParam() {
WebApplicationContextRunnerUtils
.getContextRunnerWithRequiredProperties()
.withPropertyValues(
"azure.activedirectory.user-group.allowed-group-ids = all," + GROUP_ID_1
)
.run(context ->
assertThrows(IllegalStateException.class, () -> context.getBean(AADAuthenticationProperties.class)));
}
}
|
Good point. We probably have a few places where this can be applied. I'll have a look over the next few days
|
protected void writeTo(InputStream inputStream, OutputStream entityStream) throws IOException {
try {
byte[] buffer = new byte[8192];
int c;
while ((c = inputStream.read(buffer)) != -1) {
entityStream.write(buffer, 0, c);
}
} finally {
try {
inputStream.close();
} catch (IOException e) {
}
try {
entityStream.close();
} catch (IOException e) {
}
}
}
|
int c;
|
protected void writeTo(InputStream inputStream, OutputStream entityStream) throws IOException {
try {
byte[] buffer = new byte[8192];
int c;
while ((c = inputStream.read(buffer)) != -1) {
entityStream.write(buffer, 0, c);
}
} finally {
try {
inputStream.close();
} catch (IOException e) {
}
try {
entityStream.close();
} catch (IOException e) {
}
}
}
|
class InputStreamMessageBodyHandler implements MessageBodyWriter<InputStream>, MessageBodyReader<InputStream> {
public boolean isReadable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return true;
}
public InputStream readFrom(Class<InputStream> type, Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, String> httpHeaders, InputStream entityStream) throws IOException, WebApplicationException {
return entityStream;
}
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return InputStream.class.isAssignableFrom(type);
}
@Override
public void writeTo(InputStream inputStream, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) throws IOException, WebApplicationException {
writeTo(inputStream, entityStream);
}
}
|
class InputStreamMessageBodyHandler implements MessageBodyWriter<InputStream>, MessageBodyReader<InputStream> {
public boolean isReadable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return true;
}
public InputStream readFrom(Class<InputStream> type, Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, String> httpHeaders, InputStream entityStream) throws IOException, WebApplicationException {
return entityStream;
}
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return InputStream.class.isAssignableFrom(type);
}
@Override
public void writeTo(InputStream inputStream, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream) throws IOException, WebApplicationException {
writeTo(inputStream, entityStream);
}
}
|
Let's use `System.nanoTime` to avoid clock resets.
|
public void testScheduling() throws ExecutionException, InterruptedException {
final SchedulingRpcEndpoint endpoint = new SchedulingRpcEndpoint(akkaRpcService);
endpoint.start();
final SchedulingRpcEndpointGateway gateway =
endpoint.getSelfGateway(SchedulingRpcEndpointGateway.class);
final CompletableFuture<Void> scheduleRunnableFuture = new CompletableFuture<>();
final CompletableFuture<Void> scheduleCallableFuture = new CompletableFuture<>();
final CompletableFuture<Void> executeFuture = new CompletableFuture<>();
final long scheduleTime = System.currentTimeMillis();
gateway.schedule(scheduleRunnableFuture, scheduleCallableFuture, executeFuture);
assertThat(
scheduleRunnableFuture.thenApply(ignored -> System.currentTimeMillis()).get(),
greaterThanOrEqualTo(scheduleTime + SchedulingRpcEndpoint.DELAY_MILLIS));
assertThat(
scheduleCallableFuture.thenApply(ignored -> System.currentTimeMillis()).get(),
greaterThanOrEqualTo(scheduleTime + SchedulingRpcEndpoint.DELAY_MILLIS));
executeFuture.get();
}
|
final long scheduleTime = System.currentTimeMillis();
|
public void testScheduling() throws ExecutionException, InterruptedException {
final SchedulingRpcEndpoint endpoint = new SchedulingRpcEndpoint(akkaRpcService);
endpoint.start();
final SchedulingRpcEndpointGateway gateway =
endpoint.getSelfGateway(SchedulingRpcEndpointGateway.class);
final CompletableFuture<Void> scheduleRunnableFuture = new CompletableFuture<>();
final CompletableFuture<Void> scheduleCallableFuture = new CompletableFuture<>();
final CompletableFuture<Void> executeFuture = new CompletableFuture<>();
final long scheduleTime = System.nanoTime();
gateway.schedule(scheduleRunnableFuture, scheduleCallableFuture, executeFuture);
assertThat(
scheduleRunnableFuture.thenApply(ignored -> System.nanoTime()).get(),
greaterThanOrEqualTo(
scheduleTime
+ Duration.ofMillis(SchedulingRpcEndpoint.DELAY_MILLIS).toNanos()));
assertThat(
scheduleCallableFuture.thenApply(ignored -> System.nanoTime()).get(),
greaterThanOrEqualTo(
scheduleTime
+ Duration.ofMillis(SchedulingRpcEndpoint.DELAY_MILLIS).toNanos()));
executeFuture.get();
}
|
class AkkaRpcActorTest extends TestLogger {
private static final Logger LOG = LoggerFactory.getLogger(AkkaRpcActorTest.class);
private static Time timeout = Time.milliseconds(10000L);
private static AkkaRpcService akkaRpcService;
@BeforeClass
public static void setup() {
akkaRpcService = new TestingRpcService();
}
@AfterClass
public static void shutdown()
throws InterruptedException, ExecutionException, TimeoutException {
RpcUtils.terminateRpcService(akkaRpcService, timeout);
}
/**
* Tests that the rpc endpoint and the associated rpc gateway have the same addresses.
*
* @throws Exception
*/
@Test
public void testAddressResolution() throws Exception {
DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
CompletableFuture<DummyRpcGateway> futureRpcGateway =
akkaRpcService.connect(rpcEndpoint.getAddress(), DummyRpcGateway.class);
DummyRpcGateway rpcGateway = futureRpcGateway.get(timeout.getSize(), timeout.getUnit());
assertEquals(rpcEndpoint.getAddress(), rpcGateway.getAddress());
}
/**
* Tests that a {@link RpcConnectionException} is thrown if the rpc endpoint cannot be connected
* to.
*/
@Test
public void testFailingAddressResolution() throws Exception {
CompletableFuture<DummyRpcGateway> futureRpcGateway =
akkaRpcService.connect("foobar", DummyRpcGateway.class);
try {
futureRpcGateway.get(timeout.getSize(), timeout.getUnit());
fail("The rpc connection resolution should have failed.");
} catch (ExecutionException exception) {
assertTrue(exception.getCause() instanceof RpcConnectionException);
}
}
/**
* Tests that the {@link AkkaRpcActor} discards messages until the corresponding {@link
* RpcEndpoint} has been started.
*/
@Test
public void testMessageDiscarding() throws Exception {
int expectedValue = 1337;
DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
DummyRpcGateway rpcGateway = rpcEndpoint.getSelfGateway(DummyRpcGateway.class);
CompletableFuture<Integer> result = rpcGateway.foobar();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("Expected an AkkaRpcException.");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof AkkaRpcException);
}
rpcEndpoint.setFoobar(expectedValue);
rpcEndpoint.start();
try {
result = rpcGateway.foobar();
Integer actualValue = result.get(timeout.getSize(), timeout.getUnit());
assertThat(
"The new foobar value should have been returned.",
actualValue,
Is.is(expectedValue));
} finally {
RpcUtils.terminateRpcEndpoint(rpcEndpoint, timeout);
}
}
/**
* Tests that we can wait for a RpcEndpoint to terminate.
*
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(timeout = 5000)
public void testRpcEndpointTerminationFuture() throws Exception {
final DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.getTerminationFuture();
assertFalse(terminationFuture.isDone());
CompletableFuture.runAsync(rpcEndpoint::closeAsync, akkaRpcService.getExecutor());
terminationFuture.get();
}
@Test
public void testExceptionPropagation() throws Exception {
ExceptionalEndpoint rpcEndpoint = new ExceptionalEndpoint(akkaRpcService);
rpcEndpoint.start();
ExceptionalGateway rpcGateway = rpcEndpoint.getSelfGateway(ExceptionalGateway.class);
CompletableFuture<Integer> result = rpcGateway.doStuff();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("this should fail with an exception");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
assertEquals(RuntimeException.class, cause.getClass());
assertEquals("my super specific test exception", cause.getMessage());
}
}
@Test
public void testExceptionPropagationFuturePiping() throws Exception {
ExceptionalFutureEndpoint rpcEndpoint = new ExceptionalFutureEndpoint(akkaRpcService);
rpcEndpoint.start();
ExceptionalGateway rpcGateway = rpcEndpoint.getSelfGateway(ExceptionalGateway.class);
CompletableFuture<Integer> result = rpcGateway.doStuff();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("this should fail with an exception");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
assertEquals(Exception.class, cause.getClass());
assertEquals("some test", cause.getMessage());
}
}
/** Tests that exception thrown in the onStop method are returned by the termination future. */
@Test
public void testOnStopExceptionPropagation() throws Exception {
FailingOnStopEndpoint rpcEndpoint =
new FailingOnStopEndpoint(akkaRpcService, "FailingOnStopEndpoint");
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.closeAsync();
try {
terminationFuture.get();
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof FailingOnStopEndpoint.OnStopException);
}
}
/** Checks that the onStop callback is executed within the main thread. */
@Test
public void testOnStopExecutedByMainThread() throws Exception {
SimpleRpcEndpoint simpleRpcEndpoint =
new SimpleRpcEndpoint(akkaRpcService, "SimpleRpcEndpoint");
simpleRpcEndpoint.start();
CompletableFuture<Void> terminationFuture = simpleRpcEndpoint.closeAsync();
terminationFuture.get();
}
/** Tests that actors are properly terminated when the AkkaRpcService is shut down. */
@Test
public void testActorTerminationWhenServiceShutdown() throws Exception {
final ActorSystem rpcActorSystem = AkkaUtils.createDefaultActorSystem();
final RpcService rpcService =
new AkkaRpcService(
rpcActorSystem, AkkaRpcServiceConfiguration.defaultConfiguration());
try {
SimpleRpcEndpoint rpcEndpoint =
new SimpleRpcEndpoint(rpcService, SimpleRpcEndpoint.class.getSimpleName());
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.getTerminationFuture();
rpcService.stopService();
terminationFuture.get(timeout.toMilliseconds(), TimeUnit.MILLISECONDS);
} finally {
rpcActorSystem.terminate();
FutureUtils.toJava(rpcActorSystem.whenTerminated())
.get(timeout.getSize(), timeout.getUnit());
}
}
/**
* Tests that the {@link AkkaRpcActor} only completes after the asynchronous post stop action
* has completed.
*/
@Test
public void testActorTerminationWithAsynchronousOnStopAction() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final AsynchronousOnStopEndpoint endpoint =
new AsynchronousOnStopEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
assertFalse(terminationFuture.isDone());
onStopFuture.complete(null);
terminationFuture.get();
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/**
* Tests that we can still run commands via the main thread executor when the onStop method is
* called.
*/
@Test
public void testMainThreadExecutionOnStop() throws Exception {
final MainThreadExecutorOnStopEndpoint endpoint =
new MainThreadExecutorOnStopEndpoint(akkaRpcService);
try {
endpoint.start();
CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
terminationFuture.get();
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/** Tests that when the onStop future completes that no other messages will be processed. */
@Test
public void testOnStopFutureCompletionDirectlyTerminatesAkkaRpcActor() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final TerminatingAfterOnStopFutureCompletionEndpoint endpoint =
new TerminatingAfterOnStopFutureCompletionEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final AsyncOperationGateway asyncOperationGateway =
endpoint.getSelfGateway(AsyncOperationGateway.class);
final CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
assertThat(terminationFuture.isDone(), is(false));
final CompletableFuture<Integer> firstAsyncOperationFuture =
asyncOperationGateway.asyncOperation(timeout);
final CompletableFuture<Integer> secondAsyncOperationFuture =
asyncOperationGateway.asyncOperation(timeout);
endpoint.awaitEnterAsyncOperation();
onStopFuture.complete(null);
assertThat(terminationFuture.isDone(), is(false));
endpoint.triggerUnblockAsyncOperation();
assertThat(firstAsyncOperationFuture.get(), is(42));
terminationFuture.get();
assertThat(endpoint.getNumberAsyncOperationCalls(), is(1));
assertThat(secondAsyncOperationFuture.isDone(), is(false));
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/**
* Tests that the {@link RpcEndpoint
* started.
*/
@Test
public void testOnStartIsCalledWhenRpcEndpointStarts() throws Exception {
final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(akkaRpcService, null);
try {
onStartEndpoint.start();
onStartEndpoint.awaitUntilOnStartCalled();
} finally {
RpcUtils.terminateRpcEndpoint(onStartEndpoint, timeout);
}
}
/** Tests that if onStart fails, then the endpoint terminates. */
@Test
public void testOnStartFails() throws Exception {
final FlinkException testException = new FlinkException("Test exception");
final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(akkaRpcService, testException);
onStartEndpoint.start();
onStartEndpoint.awaitUntilOnStartCalled();
try {
onStartEndpoint.getTerminationFuture().get();
fail("Expected that the rpc endpoint failed onStart and thus has terminated.");
} catch (ExecutionException ee) {
assertThat(
ExceptionUtils.findThrowable(ee, exception -> exception.equals(testException))
.isPresent(),
is(true));
}
}
/**
* Tests that multiple termination calls won't trigger the onStop action multiple times. Note
* that this test is a probabilistic test which only fails sometimes without the fix. See
* FLINK-16703.
*/
@Test
public void callsOnStopOnlyOnce() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final OnStopCountingRpcEndpoint endpoint =
new OnStopCountingRpcEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final AkkaBasedEndpoint selfGateway = endpoint.getSelfGateway(AkkaBasedEndpoint.class);
selfGateway.getActorRef().tell(ControlMessages.TERMINATE, ActorRef.noSender());
selfGateway.getActorRef().tell(ControlMessages.TERMINATE, ActorRef.noSender());
endpoint.waitUntilOnStopHasBeenCalled();
onStopFuture.complete(null);
endpoint.getTerminationFuture().get();
assertThat(endpoint.getNumOnStopCalls(), is(1));
} finally {
onStopFuture.complete(null);
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
@Test
public void canReuseEndpointNameAfterTermination() throws Exception {
final String endpointName = "not_unique";
try (SimpleRpcEndpoint simpleRpcEndpoint1 =
new SimpleRpcEndpoint(akkaRpcService, endpointName)) {
simpleRpcEndpoint1.start();
simpleRpcEndpoint1.closeAsync().join();
try (SimpleRpcEndpoint simpleRpcEndpoint2 =
new SimpleRpcEndpoint(akkaRpcService, endpointName)) {
simpleRpcEndpoint2.start();
assertThat(
simpleRpcEndpoint2.getAddress(),
is(equalTo(simpleRpcEndpoint1.getAddress())));
}
}
}
@Test
public void terminationFutureDoesNotBlockRpcEndpointCreation() throws Exception {
try (final SimpleRpcEndpoint simpleRpcEndpoint =
new SimpleRpcEndpoint(akkaRpcService, "foobar")) {
final CompletableFuture<Void> terminationFuture =
simpleRpcEndpoint.getTerminationFuture();
final CompletableFuture<SimpleRpcEndpoint> foobar2 =
terminationFuture.thenApply(
ignored -> new SimpleRpcEndpoint(akkaRpcService, "foobar2"));
simpleRpcEndpoint.closeAsync();
final SimpleRpcEndpoint simpleRpcEndpoint2 = foobar2.join();
simpleRpcEndpoint2.close();
}
}
@Test
public void resolvesRunningAkkaRpcActor() throws Exception {
final String endpointName = "foobar";
try (RpcEndpoint simpleRpcEndpoint1 = createRpcEndpointWithRandomNameSuffix(endpointName);
RpcEndpoint simpleRpcEndpoint2 =
createRpcEndpointWithRandomNameSuffix(endpointName)) {
simpleRpcEndpoint1.closeAsync().join();
final String wildcardName = AkkaRpcServiceUtils.createWildcardName(endpointName);
final String wildcardAddress = AkkaRpcServiceUtils.getLocalRpcUrl(wildcardName);
final RpcGateway rpcGateway =
akkaRpcService.connect(wildcardAddress, RpcGateway.class).join();
assertThat(rpcGateway.getAddress(), is(equalTo(simpleRpcEndpoint2.getAddress())));
}
}
private RpcEndpoint createRpcEndpointWithRandomNameSuffix(String prefix) {
return new SimpleRpcEndpoint(akkaRpcService, AkkaRpcServiceUtils.createRandomName(prefix));
}
@Test
public void canRespondWithNullValueLocally() throws Exception {
try (final NullRespondingEndpoint nullRespondingEndpoint =
new NullRespondingEndpoint(akkaRpcService)) {
nullRespondingEndpoint.start();
final NullRespondingGateway selfGateway =
nullRespondingEndpoint.getSelfGateway(NullRespondingGateway.class);
final CompletableFuture<Integer> nullValuedResponseFuture = selfGateway.foobar();
assertThat(nullValuedResponseFuture.join(), is(nullValue()));
}
}
@Test
public void canRespondWithSynchronousNullValueLocally() throws Exception {
try (final NullRespondingEndpoint nullRespondingEndpoint =
new NullRespondingEndpoint(akkaRpcService)) {
nullRespondingEndpoint.start();
final NullRespondingGateway selfGateway =
nullRespondingEndpoint.getSelfGateway(NullRespondingGateway.class);
final Integer value = selfGateway.synchronousFoobar();
assertThat(value, is(nullValue()));
}
}
@Test
public void canRespondWithSerializedValueLocally() throws Exception {
try (final SerializedValueRespondingEndpoint endpoint =
new SerializedValueRespondingEndpoint(akkaRpcService)) {
endpoint.start();
final SerializedValueRespondingGateway selfGateway =
endpoint.getSelfGateway(SerializedValueRespondingGateway.class);
assertThat(
selfGateway.getSerializedValueSynchronously(),
equalTo(SerializedValueRespondingEndpoint.SERIALIZED_VALUE));
final CompletableFuture<SerializedValue<String>> responseFuture =
selfGateway.getSerializedValue();
assertThat(
responseFuture.get(),
equalTo(SerializedValueRespondingEndpoint.SERIALIZED_VALUE));
}
}
/**
* Verifies that actions scheduled via the main thread executor are eventually run while
* adhering to the provided delays.
*
* <p>This test does not assert any upper bounds for how late something is run, because that
* would make the test unstable in some environments, and there is no guarantee that such an
* upper bound exists in the first place.
*
* <p>There are various failure points for this test, including the scheduling from the {@link
* RpcEndpoint} to the {@link AkkaInvocationHandler}, the conversion of these calls by the
* handler into Call-/RunAsync messages, the handling of said messages by the {@link
* AkkaRpcActor} and in the case of RunAsync the actual scheduling by the underlying actor
* system. This isn't an ideal test setup, but these components are difficult to test in
* isolation.
*/
@Test
interface DummyRpcGateway extends RpcGateway {
CompletableFuture<Integer> foobar();
}
static class DummyRpcEndpoint extends RpcEndpoint implements DummyRpcGateway {
private volatile int foobar = 42;
protected DummyRpcEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> foobar() {
return CompletableFuture.completedFuture(foobar);
}
public void setFoobar(int value) {
foobar = value;
}
}
interface NullRespondingGateway extends DummyRpcGateway {
Integer synchronousFoobar();
}
static class NullRespondingEndpoint extends RpcEndpoint implements NullRespondingGateway {
protected NullRespondingEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> foobar() {
return CompletableFuture.completedFuture(null);
}
@Override
public Integer synchronousFoobar() {
return null;
}
}
interface SerializedValueRespondingGateway extends RpcGateway {
CompletableFuture<SerializedValue<String>> getSerializedValue();
SerializedValue<String> getSerializedValueSynchronously();
}
static class SerializedValueRespondingEndpoint extends RpcEndpoint
implements SerializedValueRespondingGateway {
static final SerializedValue<String> SERIALIZED_VALUE;
static {
try {
SERIALIZED_VALUE = new SerializedValue<>("string-value");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public SerializedValueRespondingEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<SerializedValue<String>> getSerializedValue() {
return CompletableFuture.completedFuture(SERIALIZED_VALUE);
}
@Override
public SerializedValue<String> getSerializedValueSynchronously() {
return SERIALIZED_VALUE;
}
}
private interface ExceptionalGateway extends RpcGateway {
CompletableFuture<Integer> doStuff();
}
private static class ExceptionalEndpoint extends RpcEndpoint implements ExceptionalGateway {
protected ExceptionalEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> doStuff() {
throw new RuntimeException("my super specific test exception");
}
}
private static class ExceptionalFutureEndpoint extends RpcEndpoint
implements ExceptionalGateway {
protected ExceptionalFutureEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> doStuff() {
final CompletableFuture<Integer> future = new CompletableFuture<>();
new Thread() {
@Override
public void run() {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
future.completeExceptionally(new Exception("some test"));
}
}.start();
return future;
}
}
private static class SimpleRpcEndpoint extends RpcEndpoint implements RpcGateway {
protected SimpleRpcEndpoint(RpcService rpcService, String endpointId) {
super(rpcService, endpointId);
}
}
private static class FailingOnStopEndpoint extends RpcEndpoint implements RpcGateway {
protected FailingOnStopEndpoint(RpcService rpcService, String endpointId) {
super(rpcService, endpointId);
}
@Override
public CompletableFuture<Void> onStop() {
return FutureUtils.completedExceptionally(new OnStopException("Test exception."));
}
private static class OnStopException extends FlinkException {
private static final long serialVersionUID = 6701096588415871592L;
public OnStopException(String message) {
super(message);
}
}
}
static class AsynchronousOnStopEndpoint extends RpcEndpoint {
private final CompletableFuture<Void> onStopFuture;
protected AsynchronousOnStopEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = Preconditions.checkNotNull(onStopFuture);
}
@Override
public CompletableFuture<Void> onStop() {
return onStopFuture;
}
}
private static class MainThreadExecutorOnStopEndpoint extends RpcEndpoint {
protected MainThreadExecutorOnStopEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Void> onStop() {
return CompletableFuture.runAsync(() -> {}, getMainThreadExecutor());
}
}
interface AsyncOperationGateway extends RpcGateway {
CompletableFuture<Integer> asyncOperation(@RpcTimeout Time timeout);
}
private static class TerminatingAfterOnStopFutureCompletionEndpoint extends RpcEndpoint
implements AsyncOperationGateway {
private final CompletableFuture<Void> onStopFuture;
private final OneShotLatch blockAsyncOperation = new OneShotLatch();
private final OneShotLatch enterAsyncOperation = new OneShotLatch();
private final AtomicInteger asyncOperationCounter = new AtomicInteger(0);
protected TerminatingAfterOnStopFutureCompletionEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = onStopFuture;
}
@Override
public CompletableFuture<Integer> asyncOperation(Time timeout) {
asyncOperationCounter.incrementAndGet();
enterAsyncOperation.trigger();
try {
blockAsyncOperation.await();
} catch (InterruptedException e) {
throw new FlinkRuntimeException(e);
}
return CompletableFuture.completedFuture(42);
}
@Override
public CompletableFuture<Void> onStop() {
return onStopFuture;
}
void awaitEnterAsyncOperation() throws InterruptedException {
enterAsyncOperation.await();
}
void triggerUnblockAsyncOperation() {
blockAsyncOperation.trigger();
}
int getNumberAsyncOperationCalls() {
return asyncOperationCounter.get();
}
}
private static final class OnStartEndpoint extends RpcEndpoint {
private final CountDownLatch countDownLatch;
@Nullable private final Exception exception;
OnStartEndpoint(RpcService rpcService, @Nullable Exception exception) {
super(rpcService);
this.countDownLatch = new CountDownLatch(1);
this.exception = exception;
getTerminationFuture().whenComplete((aVoid, throwable) -> closeAsync());
}
@Override
public void onStart() throws Exception {
countDownLatch.countDown();
ExceptionUtils.tryRethrowException(exception);
}
public void awaitUntilOnStartCalled() throws InterruptedException {
countDownLatch.await();
}
}
private static final class OnStopCountingRpcEndpoint extends RpcEndpoint {
private final AtomicInteger numOnStopCalls = new AtomicInteger(0);
private final OneShotLatch onStopHasBeenCalled = new OneShotLatch();
private final CompletableFuture<Void> onStopFuture;
private OnStopCountingRpcEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = onStopFuture;
}
@Override
protected CompletableFuture<Void> onStop() {
onStopHasBeenCalled.trigger();
numOnStopCalls.incrementAndGet();
return onStopFuture;
}
private int getNumOnStopCalls() {
return numOnStopCalls.get();
}
private void waitUntilOnStopHasBeenCalled() throws InterruptedException {
onStopHasBeenCalled.await();
}
}
interface SchedulingRpcEndpointGateway extends RpcGateway {
void schedule(
final CompletableFuture<Void> scheduleRunnableFuture,
final CompletableFuture<Void> scheduleCallableFuture,
final CompletableFuture<Void> executeFuture);
}
private static final class SchedulingRpcEndpoint extends RpcEndpoint
implements SchedulingRpcEndpointGateway {
static final int DELAY_MILLIS = 20;
public SchedulingRpcEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public void schedule(
final CompletableFuture<Void> scheduleRunnableFuture,
final CompletableFuture<Void> scheduleCallableFuture,
final CompletableFuture<Void> executeFuture) {
getMainThreadExecutor()
.schedule(
() -> scheduleRunnableFuture.complete(null),
DELAY_MILLIS,
TimeUnit.MILLISECONDS);
getMainThreadExecutor()
.schedule(
() -> {
scheduleCallableFuture.complete(null);
return null;
},
DELAY_MILLIS,
TimeUnit.MILLISECONDS);
getMainThreadExecutor().execute(() -> executeFuture.complete(null));
}
}
}
|
class AkkaRpcActorTest extends TestLogger {
private static final Logger LOG = LoggerFactory.getLogger(AkkaRpcActorTest.class);
private static Time timeout = Time.milliseconds(10000L);
private static AkkaRpcService akkaRpcService;
@BeforeClass
public static void setup() {
akkaRpcService = new TestingRpcService();
}
@AfterClass
public static void shutdown()
throws InterruptedException, ExecutionException, TimeoutException {
RpcUtils.terminateRpcService(akkaRpcService, timeout);
}
/**
* Tests that the rpc endpoint and the associated rpc gateway have the same addresses.
*
* @throws Exception
*/
@Test
public void testAddressResolution() throws Exception {
DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
CompletableFuture<DummyRpcGateway> futureRpcGateway =
akkaRpcService.connect(rpcEndpoint.getAddress(), DummyRpcGateway.class);
DummyRpcGateway rpcGateway = futureRpcGateway.get(timeout.getSize(), timeout.getUnit());
assertEquals(rpcEndpoint.getAddress(), rpcGateway.getAddress());
}
/**
* Tests that a {@link RpcConnectionException} is thrown if the rpc endpoint cannot be connected
* to.
*/
@Test
public void testFailingAddressResolution() throws Exception {
CompletableFuture<DummyRpcGateway> futureRpcGateway =
akkaRpcService.connect("foobar", DummyRpcGateway.class);
try {
futureRpcGateway.get(timeout.getSize(), timeout.getUnit());
fail("The rpc connection resolution should have failed.");
} catch (ExecutionException exception) {
assertTrue(exception.getCause() instanceof RpcConnectionException);
}
}
/**
* Tests that the {@link AkkaRpcActor} discards messages until the corresponding {@link
* RpcEndpoint} has been started.
*/
@Test
public void testMessageDiscarding() throws Exception {
int expectedValue = 1337;
DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
DummyRpcGateway rpcGateway = rpcEndpoint.getSelfGateway(DummyRpcGateway.class);
CompletableFuture<Integer> result = rpcGateway.foobar();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("Expected an AkkaRpcException.");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof AkkaRpcException);
}
rpcEndpoint.setFoobar(expectedValue);
rpcEndpoint.start();
try {
result = rpcGateway.foobar();
Integer actualValue = result.get(timeout.getSize(), timeout.getUnit());
assertThat(
"The new foobar value should have been returned.",
actualValue,
Is.is(expectedValue));
} finally {
RpcUtils.terminateRpcEndpoint(rpcEndpoint, timeout);
}
}
/**
* Tests that we can wait for a RpcEndpoint to terminate.
*
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(timeout = 5000)
public void testRpcEndpointTerminationFuture() throws Exception {
final DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(akkaRpcService);
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.getTerminationFuture();
assertFalse(terminationFuture.isDone());
CompletableFuture.runAsync(rpcEndpoint::closeAsync, akkaRpcService.getExecutor());
terminationFuture.get();
}
@Test
public void testExceptionPropagation() throws Exception {
ExceptionalEndpoint rpcEndpoint = new ExceptionalEndpoint(akkaRpcService);
rpcEndpoint.start();
ExceptionalGateway rpcGateway = rpcEndpoint.getSelfGateway(ExceptionalGateway.class);
CompletableFuture<Integer> result = rpcGateway.doStuff();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("this should fail with an exception");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
assertEquals(RuntimeException.class, cause.getClass());
assertEquals("my super specific test exception", cause.getMessage());
}
}
@Test
public void testExceptionPropagationFuturePiping() throws Exception {
ExceptionalFutureEndpoint rpcEndpoint = new ExceptionalFutureEndpoint(akkaRpcService);
rpcEndpoint.start();
ExceptionalGateway rpcGateway = rpcEndpoint.getSelfGateway(ExceptionalGateway.class);
CompletableFuture<Integer> result = rpcGateway.doStuff();
try {
result.get(timeout.getSize(), timeout.getUnit());
fail("this should fail with an exception");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
assertEquals(Exception.class, cause.getClass());
assertEquals("some test", cause.getMessage());
}
}
/** Tests that exception thrown in the onStop method are returned by the termination future. */
@Test
public void testOnStopExceptionPropagation() throws Exception {
FailingOnStopEndpoint rpcEndpoint =
new FailingOnStopEndpoint(akkaRpcService, "FailingOnStopEndpoint");
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.closeAsync();
try {
terminationFuture.get();
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof FailingOnStopEndpoint.OnStopException);
}
}
/** Checks that the onStop callback is executed within the main thread. */
@Test
public void testOnStopExecutedByMainThread() throws Exception {
SimpleRpcEndpoint simpleRpcEndpoint =
new SimpleRpcEndpoint(akkaRpcService, "SimpleRpcEndpoint");
simpleRpcEndpoint.start();
CompletableFuture<Void> terminationFuture = simpleRpcEndpoint.closeAsync();
terminationFuture.get();
}
/** Tests that actors are properly terminated when the AkkaRpcService is shut down. */
@Test
public void testActorTerminationWhenServiceShutdown() throws Exception {
final ActorSystem rpcActorSystem = AkkaUtils.createDefaultActorSystem();
final RpcService rpcService =
new AkkaRpcService(
rpcActorSystem, AkkaRpcServiceConfiguration.defaultConfiguration());
try {
SimpleRpcEndpoint rpcEndpoint =
new SimpleRpcEndpoint(rpcService, SimpleRpcEndpoint.class.getSimpleName());
rpcEndpoint.start();
CompletableFuture<Void> terminationFuture = rpcEndpoint.getTerminationFuture();
rpcService.stopService();
terminationFuture.get(timeout.toMilliseconds(), TimeUnit.MILLISECONDS);
} finally {
rpcActorSystem.terminate();
FutureUtils.toJava(rpcActorSystem.whenTerminated())
.get(timeout.getSize(), timeout.getUnit());
}
}
/**
* Tests that the {@link AkkaRpcActor} only completes after the asynchronous post stop action
* has completed.
*/
@Test
public void testActorTerminationWithAsynchronousOnStopAction() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final AsynchronousOnStopEndpoint endpoint =
new AsynchronousOnStopEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
assertFalse(terminationFuture.isDone());
onStopFuture.complete(null);
terminationFuture.get();
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/**
* Tests that we can still run commands via the main thread executor when the onStop method is
* called.
*/
@Test
public void testMainThreadExecutionOnStop() throws Exception {
final MainThreadExecutorOnStopEndpoint endpoint =
new MainThreadExecutorOnStopEndpoint(akkaRpcService);
try {
endpoint.start();
CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
terminationFuture.get();
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/** Tests that when the onStop future completes that no other messages will be processed. */
@Test
public void testOnStopFutureCompletionDirectlyTerminatesAkkaRpcActor() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final TerminatingAfterOnStopFutureCompletionEndpoint endpoint =
new TerminatingAfterOnStopFutureCompletionEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final AsyncOperationGateway asyncOperationGateway =
endpoint.getSelfGateway(AsyncOperationGateway.class);
final CompletableFuture<Void> terminationFuture = endpoint.closeAsync();
assertThat(terminationFuture.isDone(), is(false));
final CompletableFuture<Integer> firstAsyncOperationFuture =
asyncOperationGateway.asyncOperation(timeout);
final CompletableFuture<Integer> secondAsyncOperationFuture =
asyncOperationGateway.asyncOperation(timeout);
endpoint.awaitEnterAsyncOperation();
onStopFuture.complete(null);
assertThat(terminationFuture.isDone(), is(false));
endpoint.triggerUnblockAsyncOperation();
assertThat(firstAsyncOperationFuture.get(), is(42));
terminationFuture.get();
assertThat(endpoint.getNumberAsyncOperationCalls(), is(1));
assertThat(secondAsyncOperationFuture.isDone(), is(false));
} finally {
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
/**
* Tests that the {@link RpcEndpoint
* started.
*/
@Test
public void testOnStartIsCalledWhenRpcEndpointStarts() throws Exception {
final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(akkaRpcService, null);
try {
onStartEndpoint.start();
onStartEndpoint.awaitUntilOnStartCalled();
} finally {
RpcUtils.terminateRpcEndpoint(onStartEndpoint, timeout);
}
}
/** Tests that if onStart fails, then the endpoint terminates. */
@Test
public void testOnStartFails() throws Exception {
final FlinkException testException = new FlinkException("Test exception");
final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(akkaRpcService, testException);
onStartEndpoint.start();
onStartEndpoint.awaitUntilOnStartCalled();
try {
onStartEndpoint.getTerminationFuture().get();
fail("Expected that the rpc endpoint failed onStart and thus has terminated.");
} catch (ExecutionException ee) {
assertThat(
ExceptionUtils.findThrowable(ee, exception -> exception.equals(testException))
.isPresent(),
is(true));
}
}
/**
* Tests that multiple termination calls won't trigger the onStop action multiple times. Note
* that this test is a probabilistic test which only fails sometimes without the fix. See
* FLINK-16703.
*/
@Test
public void callsOnStopOnlyOnce() throws Exception {
final CompletableFuture<Void> onStopFuture = new CompletableFuture<>();
final OnStopCountingRpcEndpoint endpoint =
new OnStopCountingRpcEndpoint(akkaRpcService, onStopFuture);
try {
endpoint.start();
final AkkaBasedEndpoint selfGateway = endpoint.getSelfGateway(AkkaBasedEndpoint.class);
selfGateway.getActorRef().tell(ControlMessages.TERMINATE, ActorRef.noSender());
selfGateway.getActorRef().tell(ControlMessages.TERMINATE, ActorRef.noSender());
endpoint.waitUntilOnStopHasBeenCalled();
onStopFuture.complete(null);
endpoint.getTerminationFuture().get();
assertThat(endpoint.getNumOnStopCalls(), is(1));
} finally {
onStopFuture.complete(null);
RpcUtils.terminateRpcEndpoint(endpoint, timeout);
}
}
@Test
public void canReuseEndpointNameAfterTermination() throws Exception {
final String endpointName = "not_unique";
try (SimpleRpcEndpoint simpleRpcEndpoint1 =
new SimpleRpcEndpoint(akkaRpcService, endpointName)) {
simpleRpcEndpoint1.start();
simpleRpcEndpoint1.closeAsync().join();
try (SimpleRpcEndpoint simpleRpcEndpoint2 =
new SimpleRpcEndpoint(akkaRpcService, endpointName)) {
simpleRpcEndpoint2.start();
assertThat(
simpleRpcEndpoint2.getAddress(),
is(equalTo(simpleRpcEndpoint1.getAddress())));
}
}
}
@Test
public void terminationFutureDoesNotBlockRpcEndpointCreation() throws Exception {
try (final SimpleRpcEndpoint simpleRpcEndpoint =
new SimpleRpcEndpoint(akkaRpcService, "foobar")) {
final CompletableFuture<Void> terminationFuture =
simpleRpcEndpoint.getTerminationFuture();
final CompletableFuture<SimpleRpcEndpoint> foobar2 =
terminationFuture.thenApply(
ignored -> new SimpleRpcEndpoint(akkaRpcService, "foobar2"));
simpleRpcEndpoint.closeAsync();
final SimpleRpcEndpoint simpleRpcEndpoint2 = foobar2.join();
simpleRpcEndpoint2.close();
}
}
@Test
public void resolvesRunningAkkaRpcActor() throws Exception {
final String endpointName = "foobar";
try (RpcEndpoint simpleRpcEndpoint1 = createRpcEndpointWithRandomNameSuffix(endpointName);
RpcEndpoint simpleRpcEndpoint2 =
createRpcEndpointWithRandomNameSuffix(endpointName)) {
simpleRpcEndpoint1.closeAsync().join();
final String wildcardName = AkkaRpcServiceUtils.createWildcardName(endpointName);
final String wildcardAddress = AkkaRpcServiceUtils.getLocalRpcUrl(wildcardName);
final RpcGateway rpcGateway =
akkaRpcService.connect(wildcardAddress, RpcGateway.class).join();
assertThat(rpcGateway.getAddress(), is(equalTo(simpleRpcEndpoint2.getAddress())));
}
}
private RpcEndpoint createRpcEndpointWithRandomNameSuffix(String prefix) {
return new SimpleRpcEndpoint(akkaRpcService, AkkaRpcServiceUtils.createRandomName(prefix));
}
@Test
public void canRespondWithNullValueLocally() throws Exception {
try (final NullRespondingEndpoint nullRespondingEndpoint =
new NullRespondingEndpoint(akkaRpcService)) {
nullRespondingEndpoint.start();
final NullRespondingGateway selfGateway =
nullRespondingEndpoint.getSelfGateway(NullRespondingGateway.class);
final CompletableFuture<Integer> nullValuedResponseFuture = selfGateway.foobar();
assertThat(nullValuedResponseFuture.join(), is(nullValue()));
}
}
@Test
public void canRespondWithSynchronousNullValueLocally() throws Exception {
try (final NullRespondingEndpoint nullRespondingEndpoint =
new NullRespondingEndpoint(akkaRpcService)) {
nullRespondingEndpoint.start();
final NullRespondingGateway selfGateway =
nullRespondingEndpoint.getSelfGateway(NullRespondingGateway.class);
final Integer value = selfGateway.synchronousFoobar();
assertThat(value, is(nullValue()));
}
}
@Test
public void canRespondWithSerializedValueLocally() throws Exception {
try (final SerializedValueRespondingEndpoint endpoint =
new SerializedValueRespondingEndpoint(akkaRpcService)) {
endpoint.start();
final SerializedValueRespondingGateway selfGateway =
endpoint.getSelfGateway(SerializedValueRespondingGateway.class);
assertThat(
selfGateway.getSerializedValueSynchronously(),
equalTo(SerializedValueRespondingEndpoint.SERIALIZED_VALUE));
final CompletableFuture<SerializedValue<String>> responseFuture =
selfGateway.getSerializedValue();
assertThat(
responseFuture.get(),
equalTo(SerializedValueRespondingEndpoint.SERIALIZED_VALUE));
}
}
/**
* Verifies that actions scheduled via the main thread executor are eventually run while
* adhering to the provided delays.
*
* <p>This test does not assert any upper bounds for how late something is run, because that
* would make the test unstable in some environments, and there is no guarantee that such an
* upper bound exists in the first place.
*
* <p>There are various failure points for this test, including the scheduling from the {@link
* RpcEndpoint} to the {@link AkkaInvocationHandler}, the conversion of these calls by the
* handler into Call-/RunAsync messages, the handling of said messages by the {@link
* AkkaRpcActor} and in the case of RunAsync the actual scheduling by the underlying actor
* system. This isn't an ideal test setup, but these components are difficult to test in
* isolation.
*/
@Test
interface DummyRpcGateway extends RpcGateway {
CompletableFuture<Integer> foobar();
}
static class DummyRpcEndpoint extends RpcEndpoint implements DummyRpcGateway {
private volatile int foobar = 42;
protected DummyRpcEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> foobar() {
return CompletableFuture.completedFuture(foobar);
}
public void setFoobar(int value) {
foobar = value;
}
}
interface NullRespondingGateway extends DummyRpcGateway {
Integer synchronousFoobar();
}
static class NullRespondingEndpoint extends RpcEndpoint implements NullRespondingGateway {
protected NullRespondingEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> foobar() {
return CompletableFuture.completedFuture(null);
}
@Override
public Integer synchronousFoobar() {
return null;
}
}
interface SerializedValueRespondingGateway extends RpcGateway {
CompletableFuture<SerializedValue<String>> getSerializedValue();
SerializedValue<String> getSerializedValueSynchronously();
}
static class SerializedValueRespondingEndpoint extends RpcEndpoint
implements SerializedValueRespondingGateway {
static final SerializedValue<String> SERIALIZED_VALUE;
static {
try {
SERIALIZED_VALUE = new SerializedValue<>("string-value");
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public SerializedValueRespondingEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<SerializedValue<String>> getSerializedValue() {
return CompletableFuture.completedFuture(SERIALIZED_VALUE);
}
@Override
public SerializedValue<String> getSerializedValueSynchronously() {
return SERIALIZED_VALUE;
}
}
private interface ExceptionalGateway extends RpcGateway {
CompletableFuture<Integer> doStuff();
}
private static class ExceptionalEndpoint extends RpcEndpoint implements ExceptionalGateway {
protected ExceptionalEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> doStuff() {
throw new RuntimeException("my super specific test exception");
}
}
private static class ExceptionalFutureEndpoint extends RpcEndpoint
implements ExceptionalGateway {
protected ExceptionalFutureEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Integer> doStuff() {
final CompletableFuture<Integer> future = new CompletableFuture<>();
new Thread() {
@Override
public void run() {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
future.completeExceptionally(new Exception("some test"));
}
}.start();
return future;
}
}
private static class SimpleRpcEndpoint extends RpcEndpoint implements RpcGateway {
protected SimpleRpcEndpoint(RpcService rpcService, String endpointId) {
super(rpcService, endpointId);
}
}
private static class FailingOnStopEndpoint extends RpcEndpoint implements RpcGateway {
protected FailingOnStopEndpoint(RpcService rpcService, String endpointId) {
super(rpcService, endpointId);
}
@Override
public CompletableFuture<Void> onStop() {
return FutureUtils.completedExceptionally(new OnStopException("Test exception."));
}
private static class OnStopException extends FlinkException {
private static final long serialVersionUID = 6701096588415871592L;
public OnStopException(String message) {
super(message);
}
}
}
static class AsynchronousOnStopEndpoint extends RpcEndpoint {
private final CompletableFuture<Void> onStopFuture;
protected AsynchronousOnStopEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = Preconditions.checkNotNull(onStopFuture);
}
@Override
public CompletableFuture<Void> onStop() {
return onStopFuture;
}
}
private static class MainThreadExecutorOnStopEndpoint extends RpcEndpoint {
protected MainThreadExecutorOnStopEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public CompletableFuture<Void> onStop() {
return CompletableFuture.runAsync(() -> {}, getMainThreadExecutor());
}
}
interface AsyncOperationGateway extends RpcGateway {
CompletableFuture<Integer> asyncOperation(@RpcTimeout Time timeout);
}
private static class TerminatingAfterOnStopFutureCompletionEndpoint extends RpcEndpoint
implements AsyncOperationGateway {
private final CompletableFuture<Void> onStopFuture;
private final OneShotLatch blockAsyncOperation = new OneShotLatch();
private final OneShotLatch enterAsyncOperation = new OneShotLatch();
private final AtomicInteger asyncOperationCounter = new AtomicInteger(0);
protected TerminatingAfterOnStopFutureCompletionEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = onStopFuture;
}
@Override
public CompletableFuture<Integer> asyncOperation(Time timeout) {
asyncOperationCounter.incrementAndGet();
enterAsyncOperation.trigger();
try {
blockAsyncOperation.await();
} catch (InterruptedException e) {
throw new FlinkRuntimeException(e);
}
return CompletableFuture.completedFuture(42);
}
@Override
public CompletableFuture<Void> onStop() {
return onStopFuture;
}
void awaitEnterAsyncOperation() throws InterruptedException {
enterAsyncOperation.await();
}
void triggerUnblockAsyncOperation() {
blockAsyncOperation.trigger();
}
int getNumberAsyncOperationCalls() {
return asyncOperationCounter.get();
}
}
private static final class OnStartEndpoint extends RpcEndpoint {
private final CountDownLatch countDownLatch;
@Nullable private final Exception exception;
OnStartEndpoint(RpcService rpcService, @Nullable Exception exception) {
super(rpcService);
this.countDownLatch = new CountDownLatch(1);
this.exception = exception;
getTerminationFuture().whenComplete((aVoid, throwable) -> closeAsync());
}
@Override
public void onStart() throws Exception {
countDownLatch.countDown();
ExceptionUtils.tryRethrowException(exception);
}
public void awaitUntilOnStartCalled() throws InterruptedException {
countDownLatch.await();
}
}
private static final class OnStopCountingRpcEndpoint extends RpcEndpoint {
private final AtomicInteger numOnStopCalls = new AtomicInteger(0);
private final OneShotLatch onStopHasBeenCalled = new OneShotLatch();
private final CompletableFuture<Void> onStopFuture;
private OnStopCountingRpcEndpoint(
RpcService rpcService, CompletableFuture<Void> onStopFuture) {
super(rpcService);
this.onStopFuture = onStopFuture;
}
@Override
protected CompletableFuture<Void> onStop() {
onStopHasBeenCalled.trigger();
numOnStopCalls.incrementAndGet();
return onStopFuture;
}
private int getNumOnStopCalls() {
return numOnStopCalls.get();
}
private void waitUntilOnStopHasBeenCalled() throws InterruptedException {
onStopHasBeenCalled.await();
}
}
interface SchedulingRpcEndpointGateway extends RpcGateway {
void schedule(
final CompletableFuture<Void> scheduleRunnableFuture,
final CompletableFuture<Void> scheduleCallableFuture,
final CompletableFuture<Void> executeFuture);
}
private static final class SchedulingRpcEndpoint extends RpcEndpoint
implements SchedulingRpcEndpointGateway {
static final int DELAY_MILLIS = 20;
public SchedulingRpcEndpoint(RpcService rpcService) {
super(rpcService);
}
@Override
public void schedule(
final CompletableFuture<Void> scheduleRunnableFuture,
final CompletableFuture<Void> scheduleCallableFuture,
final CompletableFuture<Void> executeFuture) {
getMainThreadExecutor()
.schedule(
() -> scheduleRunnableFuture.complete(null),
DELAY_MILLIS,
TimeUnit.MILLISECONDS);
getMainThreadExecutor()
.schedule(
() -> {
scheduleCallableFuture.complete(null);
return null;
},
DELAY_MILLIS,
TimeUnit.MILLISECONDS);
getMainThreadExecutor().execute(() -> executeFuture.complete(null));
}
}
}
|
BTW just checked, on recent JVMs the HashMap implementation is different and it looks like that this is no longer a problem. Might be time to simplify Validator as well :)
|
public FieldsHelper(final Class<?> aClass) {
final Field[] declaredFields = aClass.getDeclaredFields();
this.fields = new HashMap<>(declaredFields.length);
for (Field field : declaredFields) {
this.fields.put(field.getName(), field);
}
}
|
this.fields = new HashMap<>(declaredFields.length);
|
public FieldsHelper(final Class<?> aClass) {
final Field[] declaredFields = aClass.getDeclaredFields();
this.fields = new HashMap<>(declaredFields.length);
for (Field field : declaredFields) {
this.fields.put(field.getName(), field);
}
}
|
class FieldsHelper {
private final Map<String, Field> fields;
public Field getDeclaredField(final String name) {
return fields.get(name);
}
}
|
class FieldsHelper {
private final Map<String, Field> fields;
public Field getDeclaredField(final String name) {
return fields.get(name);
}
}
|
I'm not quite familiar with the logic here, but the QueryHeader looks like magic number style. is that better if we define some constant QueryHeader like `public final QueryHeader XXX_QUERY_HEADER=new QueryHeader(schemaName, "", "name", "name", Types.CHAR, "CHAR", 255, 0, false, false, false, false)` ? or after 3 month , it's hard to understand the logic here even by yourself.
|
private List<QueryHeader> getQueryHeader(final String schemaName) {
List<QueryHeader> result = new LinkedList<>();
result.add(new QueryHeader(schemaName, "", "name", "name", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "dataSourceNames", "dataSourceNames", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "discoverType", "discoverType", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "discoverProps", "discoverProps", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
return result;
}
|
result.add(new QueryHeader(schemaName, "", "name", "name", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
|
private List<QueryHeader> getQueryHeader(final String schemaName) {
List<QueryHeader> result = new LinkedList<>();
result.add(new QueryHeader(schemaName, "", "name", "name", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "dataSourceNames", "dataSourceNames", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "discoverType", "discoverType", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
result.add(new QueryHeader(schemaName, "", "discoverProps", "discoverProps", Types.CHAR, "CHAR", 255, 0, false, false, false, false));
return result;
}
|
class DatabaseDiscoveryRulesQueryBackendHandler extends SchemaRequiredBackendHandler<ShowDatabaseDiscoveryRulesStatement> {
private Iterator<DatabaseDiscoveryDataSourceRuleConfiguration> data;
private Map<String, ShardingSphereAlgorithmConfiguration> discoverTypes;
public DatabaseDiscoveryRulesQueryBackendHandler(final ShowDatabaseDiscoveryRulesStatement sqlStatement, final BackendConnection backendConnection) {
super(sqlStatement, backendConnection);
}
@Override
protected ResponseHeader execute(final String schemaName, final ShowDatabaseDiscoveryRulesStatement sqlStatement) {
loadRuleConfiguration(schemaName);
return new QueryResponseHeader(getQueryHeader(schemaName));
}
private void loadRuleConfiguration(final String schemaName) {
Optional<DatabaseDiscoveryRuleConfiguration> ruleConfig = ProxyContext.getInstance().getMetaData(schemaName).getRuleMetaData().getConfigurations()
.stream().filter(each -> each instanceof DatabaseDiscoveryRuleConfiguration).map(each -> (DatabaseDiscoveryRuleConfiguration) each).findAny();
data = ruleConfig.map(optional -> optional.getDataSources().iterator()).orElse(Collections.emptyIterator());
discoverTypes = ruleConfig.map(DatabaseDiscoveryRuleConfiguration::getDiscoveryTypes).orElse(Maps.newHashMap());
}
@Override
public boolean next() {
return data.hasNext();
}
@Override
public Collection<Object> getRowData() {
DatabaseDiscoveryDataSourceRuleConfiguration ruleConfig = data.next();
return Arrays.asList(ruleConfig.getName(), ruleConfig.getDataSourceNames(),
discoverTypes.get(ruleConfig.getDiscoveryTypeName()).getType(),
discoverTypes.get(ruleConfig.getDiscoveryTypeName()).getProps());
}
}
|
class DatabaseDiscoveryRulesQueryBackendHandler extends SchemaRequiredBackendHandler<ShowDatabaseDiscoveryRulesStatement> {
private Iterator<DatabaseDiscoveryDataSourceRuleConfiguration> data;
private Map<String, ShardingSphereAlgorithmConfiguration> discoverTypes;
public DatabaseDiscoveryRulesQueryBackendHandler(final ShowDatabaseDiscoveryRulesStatement sqlStatement, final BackendConnection backendConnection) {
super(sqlStatement, backendConnection);
}
@Override
protected ResponseHeader execute(final String schemaName, final ShowDatabaseDiscoveryRulesStatement sqlStatement) {
loadRuleConfiguration(schemaName);
return new QueryResponseHeader(getQueryHeader(schemaName));
}
private void loadRuleConfiguration(final String schemaName) {
Optional<DatabaseDiscoveryRuleConfiguration> ruleConfig = ProxyContext.getInstance().getMetaData(schemaName).getRuleMetaData().getConfigurations()
.stream().filter(each -> each instanceof DatabaseDiscoveryRuleConfiguration).map(each -> (DatabaseDiscoveryRuleConfiguration) each).findAny();
data = ruleConfig.map(optional -> optional.getDataSources().iterator()).orElse(Collections.emptyIterator());
discoverTypes = ruleConfig.map(DatabaseDiscoveryRuleConfiguration::getDiscoveryTypes).orElse(Maps.newHashMap());
}
@Override
public boolean next() {
return data.hasNext();
}
@Override
public Collection<Object> getRowData() {
DatabaseDiscoveryDataSourceRuleConfiguration ruleConfig = data.next();
return Arrays.asList(ruleConfig.getName(), ruleConfig.getDataSourceNames(),
discoverTypes.get(ruleConfig.getDiscoveryTypeName()).getType(),
discoverTypes.get(ruleConfig.getDiscoveryTypeName()).getProps());
}
}
|
We also need to override the `RestOptions.BIND_PORT`, `BlobServerOptions.PORT`.
|
static Configuration loadConfiguration(Configuration dynamicParameters) {
final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR);
Preconditions.checkNotNull(
configDir,
"Flink configuration directory (%s) in environment should not be null!",
ConfigConstants.ENV_FLINK_CONF_DIR);
final Configuration configuration =
GlobalConfiguration.loadConfiguration(configDir, dynamicParameters);
if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) {
if (KubernetesUtils.isHostNetwork(configuration)) {
configuration.setString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, "0");
configuration.setInteger(JobManagerOptions.PORT, 0);
} else {
final String ipAddress = System.getenv().get(Constants.ENV_FLINK_POD_IP_ADDRESS);
Preconditions.checkState(
ipAddress != null,
"JobManager ip address environment variable %s not set",
Constants.ENV_FLINK_POD_IP_ADDRESS);
configuration.setString(JobManagerOptions.ADDRESS, ipAddress);
configuration.setString(RestOptions.ADDRESS, ipAddress);
}
}
return configuration;
}
|
if (KubernetesUtils.isHostNetwork(configuration)) {
|
static Configuration loadConfiguration(Configuration dynamicParameters) {
final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR);
Preconditions.checkNotNull(
configDir,
"Flink configuration directory (%s) in environment should not be null!",
ConfigConstants.ENV_FLINK_CONF_DIR);
final Configuration configuration =
GlobalConfiguration.loadConfiguration(configDir, dynamicParameters);
if (KubernetesUtils.isHostNetwork(configuration)) {
configuration.setString(RestOptions.BIND_PORT, "0");
configuration.setInteger(JobManagerOptions.PORT, 0);
configuration.setString(BlobServerOptions.PORT, "0");
configuration.setString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, "0");
configuration.setString(TaskManagerOptions.RPC_PORT, "0");
}
if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) {
final String ipAddress = System.getenv().get(Constants.ENV_FLINK_POD_IP_ADDRESS);
Preconditions.checkState(
ipAddress != null,
"JobManager ip address environment variable %s not set",
Constants.ENV_FLINK_POD_IP_ADDRESS);
configuration.setString(JobManagerOptions.ADDRESS, ipAddress);
configuration.setString(RestOptions.ADDRESS, ipAddress);
}
return configuration;
}
|
class KubernetesEntrypointUtils {
private static final Logger LOG = LoggerFactory.getLogger(KubernetesEntrypointUtils.class);
/**
* For non-HA cluster, {@link JobManagerOptions
* on client side. See {@link KubernetesClusterDescriptor
* TaskManager will use service address to contact with JobManager. For HA cluster, {@link
* JobManagerOptions
* or other high-availability service to find the address of JobManager.
*
* @return Updated configuration
*/
private KubernetesEntrypointUtils() {}
}
|
class KubernetesEntrypointUtils {
private static final Logger LOG = LoggerFactory.getLogger(KubernetesEntrypointUtils.class);
/**
* For non-HA cluster, {@link JobManagerOptions
* on client side. See {@link KubernetesClusterDescriptor
* TaskManager will use service address to contact with JobManager. For HA cluster, {@link
* JobManagerOptions
* or other high-availability service to find the address of JobManager.
*
* @return Updated configuration
*/
private KubernetesEntrypointUtils() {}
}
|
We should probably log this as an error. If we could make breaking changes, I would fail submission here. In general, a user specifying a configuration that doesn't actually do the thing they would expect is not good.
|
public WriteResult expand(PCollection<T> input) {
checkArgument(
getTableFunction() != null
|| getJsonTableRef() != null
|| getDynamicDestinations() != null,
"must set the table reference of a BigQueryIO.Write transform");
List<?> allToArgs =
Lists.newArrayList(getJsonTableRef(), getTableFunction(), getDynamicDestinations());
checkArgument(
1
== Iterables.size(
allToArgs.stream()
.filter(Predicates.notNull()::apply)
.collect(Collectors.toList())),
"Exactly one of jsonTableRef, tableFunction, or dynamicDestinations must be set");
List<?> allSchemaArgs =
Lists.newArrayList(getJsonSchema(), getSchemaFromView(), getDynamicDestinations());
checkArgument(
2
> Iterables.size(
allSchemaArgs.stream()
.filter(Predicates.notNull()::apply)
.collect(Collectors.toList())),
"No more than one of jsonSchema, schemaFromView, or dynamicDestinations may be set");
BigQueryOptions bqOptions = input.getPipeline().getOptions().as(BigQueryOptions.class);
Write.Method method = resolveMethod(input);
if (input.isBounded() == IsBounded.UNBOUNDED) {
if (method == Write.Method.FILE_LOADS || method == Write.Method.STORAGE_WRITE_API) {
Duration triggeringFrequency =
(method == Write.Method.STORAGE_WRITE_API)
? getStorageApiTriggeringFrequency(bqOptions)
: getTriggeringFrequency();
checkArgument(
triggeringFrequency != null,
"When writing an unbounded PCollection via FILE_LOADS or STORAGE_WRITE_API, "
+ "triggering frequency must be specified");
} else {
checkArgument(
getTriggeringFrequency() == null,
"Triggering frequency can be specified only when writing via FILE_LOADS or STORAGE_WRITE_API, but the method was %s.",
method);
}
if (method != Method.FILE_LOADS) {
checkArgument(
getNumFileShards() == 0,
"Number of file shards can be specified only when writing via FILE_LOADS, but the method was %s.",
method);
}
if (method == Method.STORAGE_API_AT_LEAST_ONCE
&& getStorageApiTriggeringFrequency(bqOptions) != null) {
LOG.warn(
"Storage API triggering frequency option will be ignored is it can only be specified only "
+ "when writing via STORAGE_WRITE_API, but the method was {}.",
method);
}
if (getAutoSharding()) {
if (method == Method.STORAGE_WRITE_API && getStorageApiNumStreams(bqOptions) > 0) {
LOG.warn(
"Both numStorageWriteApiStreams and auto-sharding options are set. Will default to auto-sharding."
+ " To set a fixed number of streams, do not enable auto-sharding.");
} else if (method == Method.FILE_LOADS && getNumFileShards() > 0) {
LOG.warn(
"Both numFileShards and auto-sharding options are set. Will default to auto-sharding."
+ " To set a fixed number of file shards, do not enable auto-sharding.");
} else if (method == Method.STORAGE_API_AT_LEAST_ONCE) {
LOG.warn(
"The setting of auto-sharding is ignored. It is only supported when writing an"
+ " unbounded PCollection via FILE_LOADS, STREAMING_INSERTS or"
+ " STORAGE_WRITE_API, but the method was {}.",
method);
}
}
} else {
String error =
String.format(
" is only applicable to an unbounded PCollection, but the input PCollection is %s.",
input.isBounded());
checkArgument(getTriggeringFrequency() == null, "Triggering frequency" + error);
checkArgument(!getAutoSharding(), "Auto-sharding" + error);
checkArgument(getNumFileShards() == 0, "Number of file shards" + error);
if (getStorageApiTriggeringFrequency(bqOptions) != null) {
LOG.warn("Storage API triggering frequency" + error);
}
if (getStorageApiNumStreams(bqOptions) != 0) {
LOG.warn("Setting the number of Storage API streams" + error);
}
}
if (method == Method.STORAGE_API_AT_LEAST_ONCE && getStorageApiNumStreams(bqOptions) != 0) {
LOG.warn(
"Setting a number of Storage API streams is only supported when using STORAGE_WRITE_API");
}
if (method != Method.STORAGE_WRITE_API && method != Method.STORAGE_API_AT_LEAST_ONCE) {
checkArgument(
!getAutoSchemaUpdate(),
"withAutoSchemaUpdate only supported when using STORAGE_WRITE_API or STORAGE_API_AT_LEAST_ONCE.");
} else if (getWriteDisposition() == WriteDisposition.WRITE_TRUNCATE) {
LOG.warn("The Storage API sink does not support the WRITE_TRUNCATE write disposition.");
}
if (getRowMutationInformationFn() != null) {
checkArgument(getMethod() == Method.STORAGE_API_AT_LEAST_ONCE);
checkArgument(
getCreateDisposition() == CreateDisposition.CREATE_NEVER || getPrimaryKey() != null,
"If specifying CREATE_IF_NEEDED along with row updates, a primary key needs to be specified");
}
if (getPrimaryKey() != null) {
checkArgument(
getMethod() != Method.FILE_LOADS, "Primary key not supported when using FILE_LOADS");
}
if (getAutoSchemaUpdate()) {
checkArgument(
getIgnoreUnknownValues(),
"Auto schema update currently only supported when ignoreUnknownValues also set.");
checkArgument(
!getUseBeamSchema(), "Auto schema update not supported when using Beam schemas.");
}
if (getJsonTimePartitioning() != null) {
checkArgument(
getDynamicDestinations() == null,
"The supplied DynamicDestinations object can directly set TimePartitioning."
+ " There is no need to call BigQueryIO.Write.withTimePartitioning.");
checkArgument(
getTableFunction() == null,
"The supplied getTableFunction object can directly set TimePartitioning."
+ " There is no need to call BigQueryIO.Write.withTimePartitioning.");
}
DynamicDestinations<T, ?> dynamicDestinations = getDynamicDestinations();
if (dynamicDestinations == null) {
if (getJsonTableRef() != null) {
dynamicDestinations =
DynamicDestinationsHelpers.ConstantTableDestinations.fromJsonTableRef(
getJsonTableRef(), getTableDescription(), getClustering() != null);
} else if (getTableFunction() != null) {
dynamicDestinations =
new TableFunctionDestinations<>(getTableFunction(), getClustering() != null);
}
if (getJsonSchema() != null) {
dynamicDestinations =
new ConstantSchemaDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations, getJsonSchema());
} else if (getSchemaFromView() != null) {
dynamicDestinations =
new SchemaFromViewDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
getSchemaFromView());
}
if (getJsonTimePartitioning() != null) {
dynamicDestinations =
new ConstantTimePartitioningDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
getJsonTimePartitioning(),
StaticValueProvider.of(BigQueryHelpers.toJsonString(getClustering())));
}
if (getPrimaryKey() != null) {
dynamicDestinations =
new DynamicDestinationsHelpers.ConstantTableConstraintsDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
new TableConstraints()
.setPrimaryKey(
new TableConstraints.PrimaryKey().setColumns(getPrimaryKey())));
}
}
return expandTyped(input, dynamicDestinations);
}
|
} else if (getWriteDisposition() == WriteDisposition.WRITE_TRUNCATE) {
|
public WriteResult expand(PCollection<T> input) {
checkArgument(
getTableFunction() != null
|| getJsonTableRef() != null
|| getDynamicDestinations() != null,
"must set the table reference of a BigQueryIO.Write transform");
List<?> allToArgs =
Lists.newArrayList(getJsonTableRef(), getTableFunction(), getDynamicDestinations());
checkArgument(
1
== Iterables.size(
allToArgs.stream()
.filter(Predicates.notNull()::apply)
.collect(Collectors.toList())),
"Exactly one of jsonTableRef, tableFunction, or dynamicDestinations must be set");
List<?> allSchemaArgs =
Lists.newArrayList(getJsonSchema(), getSchemaFromView(), getDynamicDestinations());
checkArgument(
2
> Iterables.size(
allSchemaArgs.stream()
.filter(Predicates.notNull()::apply)
.collect(Collectors.toList())),
"No more than one of jsonSchema, schemaFromView, or dynamicDestinations may be set");
BigQueryOptions bqOptions = input.getPipeline().getOptions().as(BigQueryOptions.class);
Write.Method method = resolveMethod(input);
if (input.isBounded() == IsBounded.UNBOUNDED) {
if (method == Write.Method.FILE_LOADS || method == Write.Method.STORAGE_WRITE_API) {
Duration triggeringFrequency =
(method == Write.Method.STORAGE_WRITE_API)
? getStorageApiTriggeringFrequency(bqOptions)
: getTriggeringFrequency();
checkArgument(
triggeringFrequency != null,
"When writing an unbounded PCollection via FILE_LOADS or STORAGE_WRITE_API, "
+ "triggering frequency must be specified");
} else {
checkArgument(
getTriggeringFrequency() == null,
"Triggering frequency can be specified only when writing via FILE_LOADS or STORAGE_WRITE_API, but the method was %s.",
method);
}
if (method != Method.FILE_LOADS) {
checkArgument(
getNumFileShards() == 0,
"Number of file shards can be specified only when writing via FILE_LOADS, but the method was %s.",
method);
}
if (method == Method.STORAGE_API_AT_LEAST_ONCE
&& getStorageApiTriggeringFrequency(bqOptions) != null) {
LOG.warn(
"Storage API triggering frequency option will be ignored is it can only be specified only "
+ "when writing via STORAGE_WRITE_API, but the method was {}.",
method);
}
if (getAutoSharding()) {
if (method == Method.STORAGE_WRITE_API && getStorageApiNumStreams(bqOptions) > 0) {
LOG.warn(
"Both numStorageWriteApiStreams and auto-sharding options are set. Will default to auto-sharding."
+ " To set a fixed number of streams, do not enable auto-sharding.");
} else if (method == Method.FILE_LOADS && getNumFileShards() > 0) {
LOG.warn(
"Both numFileShards and auto-sharding options are set. Will default to auto-sharding."
+ " To set a fixed number of file shards, do not enable auto-sharding.");
} else if (method == Method.STORAGE_API_AT_LEAST_ONCE) {
LOG.warn(
"The setting of auto-sharding is ignored. It is only supported when writing an"
+ " unbounded PCollection via FILE_LOADS, STREAMING_INSERTS or"
+ " STORAGE_WRITE_API, but the method was {}.",
method);
}
}
} else {
String error =
String.format(
" is only applicable to an unbounded PCollection, but the input PCollection is %s.",
input.isBounded());
checkArgument(getTriggeringFrequency() == null, "Triggering frequency" + error);
checkArgument(!getAutoSharding(), "Auto-sharding" + error);
checkArgument(getNumFileShards() == 0, "Number of file shards" + error);
if (getStorageApiTriggeringFrequency(bqOptions) != null) {
LOG.warn("Setting a triggering frequency" + error);
}
if (getStorageApiNumStreams(bqOptions) != 0) {
LOG.warn("Setting the number of Storage API streams" + error);
}
}
if (method == Method.STORAGE_API_AT_LEAST_ONCE && getStorageApiNumStreams(bqOptions) != 0) {
LOG.warn(
"Setting a number of Storage API streams is only supported when using STORAGE_WRITE_API");
}
if (method != Method.STORAGE_WRITE_API && method != Method.STORAGE_API_AT_LEAST_ONCE) {
checkArgument(
!getAutoSchemaUpdate(),
"withAutoSchemaUpdate only supported when using STORAGE_WRITE_API or STORAGE_API_AT_LEAST_ONCE.");
} else if (getWriteDisposition() == WriteDisposition.WRITE_TRUNCATE) {
LOG.error("The Storage API sink does not support the WRITE_TRUNCATE write disposition.");
}
if (getRowMutationInformationFn() != null) {
checkArgument(getMethod() == Method.STORAGE_API_AT_LEAST_ONCE);
checkArgument(
getCreateDisposition() == CreateDisposition.CREATE_NEVER || getPrimaryKey() != null,
"If specifying CREATE_IF_NEEDED along with row updates, a primary key needs to be specified");
}
if (getPrimaryKey() != null) {
checkArgument(
getMethod() != Method.FILE_LOADS, "Primary key not supported when using FILE_LOADS");
}
if (getAutoSchemaUpdate()) {
checkArgument(
getIgnoreUnknownValues(),
"Auto schema update currently only supported when ignoreUnknownValues also set.");
checkArgument(
!getUseBeamSchema(), "Auto schema update not supported when using Beam schemas.");
}
if (getJsonTimePartitioning() != null) {
checkArgument(
getDynamicDestinations() == null,
"The supplied DynamicDestinations object can directly set TimePartitioning."
+ " There is no need to call BigQueryIO.Write.withTimePartitioning.");
checkArgument(
getTableFunction() == null,
"The supplied getTableFunction object can directly set TimePartitioning."
+ " There is no need to call BigQueryIO.Write.withTimePartitioning.");
}
DynamicDestinations<T, ?> dynamicDestinations = getDynamicDestinations();
if (dynamicDestinations == null) {
if (getJsonTableRef() != null) {
dynamicDestinations =
DynamicDestinationsHelpers.ConstantTableDestinations.fromJsonTableRef(
getJsonTableRef(), getTableDescription(), getClustering() != null);
} else if (getTableFunction() != null) {
dynamicDestinations =
new TableFunctionDestinations<>(getTableFunction(), getClustering() != null);
}
if (getJsonSchema() != null) {
dynamicDestinations =
new ConstantSchemaDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations, getJsonSchema());
} else if (getSchemaFromView() != null) {
dynamicDestinations =
new SchemaFromViewDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
getSchemaFromView());
}
if (getJsonTimePartitioning() != null) {
dynamicDestinations =
new ConstantTimePartitioningDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
getJsonTimePartitioning(),
StaticValueProvider.of(BigQueryHelpers.toJsonString(getClustering())));
}
if (getPrimaryKey() != null) {
dynamicDestinations =
new DynamicDestinationsHelpers.ConstantTableConstraintsDestinations<>(
(DynamicDestinations<T, TableDestination>) dynamicDestinations,
new TableConstraints()
.setPrimaryKey(
new TableConstraints.PrimaryKey().setColumns(getPrimaryKey())));
}
}
return expandTyped(input, dynamicDestinations);
}
|
class Builder<T> {
abstract Builder<T> setJsonTableRef(ValueProvider<String> jsonTableRef);
abstract Builder<T> setTableFunction(
SerializableFunction<ValueInSingleWindow<T>, TableDestination> tableFunction);
abstract Builder<T> setFormatFunction(SerializableFunction<T, TableRow> formatFunction);
abstract Builder<T> setFormatRecordOnFailureFunction(
SerializableFunction<T, TableRow> formatFunction);
abstract Builder<T> setAvroRowWriterFactory(
RowWriterFactory.AvroRowWriterFactory<T, ?, ?> avroRowWriterFactory);
abstract Builder<T> setAvroSchemaFactory(
SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema> avroSchemaFactory);
abstract Builder<T> setUseAvroLogicalTypes(boolean useAvroLogicalTypes);
abstract Builder<T> setDynamicDestinations(DynamicDestinations<T, ?> dynamicDestinations);
abstract Builder<T> setSchemaFromView(PCollectionView<Map<String, String>> view);
abstract Builder<T> setJsonSchema(ValueProvider<String> jsonSchema);
abstract Builder<T> setJsonTimePartitioning(ValueProvider<String> jsonTimePartitioning);
abstract Builder<T> setClustering(Clustering clustering);
abstract Builder<T> setCreateDisposition(CreateDisposition createDisposition);
abstract Builder<T> setWriteDisposition(WriteDisposition writeDisposition);
abstract Builder<T> setSchemaUpdateOptions(Set<SchemaUpdateOption> schemaUpdateOptions);
abstract Builder<T> setTableDescription(String tableDescription);
abstract Builder<T> setValidate(boolean validate);
abstract Builder<T> setBigQueryServices(BigQueryServices bigQueryServices);
abstract Builder<T> setMaxFilesPerBundle(Integer maxFilesPerBundle);
abstract Builder<T> setMaxFileSize(Long maxFileSize);
abstract Builder<T> setNumFileShards(int numFileShards);
abstract Builder<T> setNumStorageWriteApiStreams(int numStorageApiStreams);
abstract Builder<T> setPropagateSuccessfulStorageApiWrites(
boolean propagateSuccessfulStorageApiWrites);
abstract Builder<T> setMaxFilesPerPartition(int maxFilesPerPartition);
abstract Builder<T> setMaxBytesPerPartition(long maxBytesPerPartition);
abstract Builder<T> setTriggeringFrequency(Duration triggeringFrequency);
abstract Builder<T> setMethod(Write.Method method);
abstract Builder<T> setLoadJobProjectId(ValueProvider<String> loadJobProjectId);
abstract Builder<T> setFailedInsertRetryPolicy(InsertRetryPolicy retryPolicy);
abstract Builder<T> setCustomGcsTempLocation(ValueProvider<String> customGcsTempLocation);
abstract Builder<T> setExtendedErrorInfo(boolean extendedErrorInfo);
abstract Builder<T> setSkipInvalidRows(Boolean skipInvalidRows);
abstract Builder<T> setIgnoreUnknownValues(Boolean ignoreUnknownValues);
abstract Builder<T> setIgnoreInsertIds(Boolean ignoreInsertIds);
abstract Builder<T> setKmsKey(@Nullable String kmsKey);
abstract Builder<T> setPrimaryKey(@Nullable List<String> primaryKey);
abstract Builder<T> setOptimizeWrites(Boolean optimizeWrites);
abstract Builder<T> setUseBeamSchema(Boolean useBeamSchema);
abstract Builder<T> setAutoSharding(Boolean autoSharding);
abstract Builder<T> setMaxRetryJobs(int maxRetryJobs);
abstract Builder<T> setPropagateSuccessful(Boolean propagateSuccessful);
abstract Builder<T> setAutoSchemaUpdate(Boolean autoSchemaUpdate);
abstract Builder<T> setWriteProtosClass(@Nullable Class<T> clazz);
abstract Builder<T> setDirectWriteProtos(Boolean direct);
abstract Builder<T> setDeterministicRecordIdFn(
SerializableFunction<T, String> toUniqueIdFunction);
abstract Builder<T> setWriteTempDataset(String writeTempDataset);
abstract Builder<T> setRowMutationInformationFn(
SerializableFunction<T, RowMutationInformation> rowMutationFn);
abstract Write<T> build();
}
|
class Builder<T> {
abstract Builder<T> setJsonTableRef(ValueProvider<String> jsonTableRef);
abstract Builder<T> setTableFunction(
SerializableFunction<ValueInSingleWindow<T>, TableDestination> tableFunction);
abstract Builder<T> setFormatFunction(SerializableFunction<T, TableRow> formatFunction);
abstract Builder<T> setFormatRecordOnFailureFunction(
SerializableFunction<T, TableRow> formatFunction);
abstract Builder<T> setAvroRowWriterFactory(
RowWriterFactory.AvroRowWriterFactory<T, ?, ?> avroRowWriterFactory);
abstract Builder<T> setAvroSchemaFactory(
SerializableFunction<@Nullable TableSchema, org.apache.avro.Schema> avroSchemaFactory);
abstract Builder<T> setUseAvroLogicalTypes(boolean useAvroLogicalTypes);
abstract Builder<T> setDynamicDestinations(DynamicDestinations<T, ?> dynamicDestinations);
abstract Builder<T> setSchemaFromView(PCollectionView<Map<String, String>> view);
abstract Builder<T> setJsonSchema(ValueProvider<String> jsonSchema);
abstract Builder<T> setJsonTimePartitioning(ValueProvider<String> jsonTimePartitioning);
abstract Builder<T> setClustering(Clustering clustering);
abstract Builder<T> setCreateDisposition(CreateDisposition createDisposition);
abstract Builder<T> setWriteDisposition(WriteDisposition writeDisposition);
abstract Builder<T> setSchemaUpdateOptions(Set<SchemaUpdateOption> schemaUpdateOptions);
abstract Builder<T> setTableDescription(String tableDescription);
abstract Builder<T> setValidate(boolean validate);
abstract Builder<T> setBigQueryServices(BigQueryServices bigQueryServices);
abstract Builder<T> setMaxFilesPerBundle(Integer maxFilesPerBundle);
abstract Builder<T> setMaxFileSize(Long maxFileSize);
abstract Builder<T> setNumFileShards(int numFileShards);
abstract Builder<T> setNumStorageWriteApiStreams(int numStorageApiStreams);
abstract Builder<T> setPropagateSuccessfulStorageApiWrites(
boolean propagateSuccessfulStorageApiWrites);
abstract Builder<T> setMaxFilesPerPartition(int maxFilesPerPartition);
abstract Builder<T> setMaxBytesPerPartition(long maxBytesPerPartition);
abstract Builder<T> setTriggeringFrequency(Duration triggeringFrequency);
abstract Builder<T> setMethod(Write.Method method);
abstract Builder<T> setLoadJobProjectId(ValueProvider<String> loadJobProjectId);
abstract Builder<T> setFailedInsertRetryPolicy(InsertRetryPolicy retryPolicy);
abstract Builder<T> setCustomGcsTempLocation(ValueProvider<String> customGcsTempLocation);
abstract Builder<T> setExtendedErrorInfo(boolean extendedErrorInfo);
abstract Builder<T> setSkipInvalidRows(Boolean skipInvalidRows);
abstract Builder<T> setIgnoreUnknownValues(Boolean ignoreUnknownValues);
abstract Builder<T> setIgnoreInsertIds(Boolean ignoreInsertIds);
abstract Builder<T> setKmsKey(@Nullable String kmsKey);
abstract Builder<T> setPrimaryKey(@Nullable List<String> primaryKey);
abstract Builder<T> setOptimizeWrites(Boolean optimizeWrites);
abstract Builder<T> setUseBeamSchema(Boolean useBeamSchema);
abstract Builder<T> setAutoSharding(Boolean autoSharding);
abstract Builder<T> setMaxRetryJobs(int maxRetryJobs);
abstract Builder<T> setPropagateSuccessful(Boolean propagateSuccessful);
abstract Builder<T> setAutoSchemaUpdate(Boolean autoSchemaUpdate);
abstract Builder<T> setWriteProtosClass(@Nullable Class<T> clazz);
abstract Builder<T> setDirectWriteProtos(Boolean direct);
abstract Builder<T> setDeterministicRecordIdFn(
SerializableFunction<T, String> toUniqueIdFunction);
abstract Builder<T> setWriteTempDataset(String writeTempDataset);
abstract Builder<T> setRowMutationInformationFn(
SerializableFunction<T, RowMutationInformation> rowMutationFn);
abstract Write<T> build();
}
|
Are we going to clean these stuffs up?
|
public BType readType() throws IOException {
byte tag = inputStream.readByte();
switch (tag) {
case TypeTags.INT:
return symTable.intType;
case TypeTags.BYTE:
return symTable.byteType;
case TypeTags.FLOAT:
return symTable.floatType;
case TypeTags.DECIMAL:
return symTable.decimalType;
case TypeTags.STRING:
return symTable.stringType;
case TypeTags.BOOLEAN:
return symTable.booleanType;
case TypeTags.JSON:
return symTable.jsonType;
case TypeTags.XML:
return symTable.xmlType;
case TypeTags.TABLE:
BTableType bTableType = new BTableType(TypeTags.TABLE, null, symTable.tableType.tsymbol);
bTableType.constraint = readType();
return bTableType;
case TypeTags.NIL:
return symTable.nilType;
case TypeTags.ANYDATA:
return symTable.anydataType;
case TypeTags.RECORD:
String name = getUTF8CPEntryValue(inputStream);
BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(EnumSet.of(Flag.PUBLIC)),
names.fromString(name), env.pkgSymbol.pkgID, null, env.pkgSymbol);
recordSymbol.scope = new Scope(recordSymbol);
BRecordType recordType = new BRecordType(recordSymbol);
recordSymbol.type = recordType;
compositeStack.push(recordType);
recordType.sealed = inputStream.readBoolean();
recordType.restFieldType = readType();
int recordFields = inputStream.readInt();
for (int i = 0; i < recordFields; i++) {
String fieldName = getUTF8CPEntryValue(inputStream);
BType fieldType = readType();
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(fieldName),
recordSymbol.pkgID, fieldType, recordSymbol.scope.owner);
recordSymbol.scope.define(varSymbol.name, varSymbol);
}
getUTF8CPEntryValue(inputStream);
inputStream.readByte();
readType();
Object poppedRecordType = compositeStack.pop();
assert poppedRecordType == recordType;
return recordType;
case TypeTags.TYPEDESC:
return symTable.typeDesc;
case TypeTags.STREAM:
BStreamType bStreamType = new BStreamType(TypeTags.STREAM, null, symTable.streamType.tsymbol);
bStreamType.constraint = readType();
return bStreamType;
case TypeTags.MAP:
BMapType bMapType = new BMapType(TypeTags.MAP, null, symTable.mapType.tsymbol);
bMapType.constraint = readType();
return bMapType;
case TypeTags.INVOKABLE:
BTypeSymbol tsymbol = Symbols.createTypeSymbol(SymTag.FUNCTION_TYPE,
Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null,
env.pkgSymbol.owner);
BInvokableType bInvokableType = new BInvokableType(null, null, tsymbol);
int paramCount = inputStream.readInt();
List<BType> paramTypes = new ArrayList<>();
for (int i = 0; i < paramCount; i++) {
paramTypes.add(readType());
}
BType retType = readType();
bInvokableType.paramTypes = paramTypes;
bInvokableType.retType = retType;
return bInvokableType;
case TypeTags.ANY:
return symTable.anyType;
case TypeTags.ENDPOINT:
break;
case TypeTags.ARRAY:
byte state = inputStream.readByte();
int size = inputStream.readInt();
BTypeSymbol arrayTypeSymbol = Symbols.createTypeSymbol(SymTag.ARRAY_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BArrayType bArrayType = new BArrayType(null, arrayTypeSymbol, size, BArrayState.valueOf(state));
bArrayType.eType = readType();
return bArrayType;
case TypeTags.UNION:
BTypeSymbol unionTypeSymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BUnionType unionType = BUnionType.create(unionTypeSymbol,
new LinkedHashSet<>());
int unionMemberCount = inputStream.readInt();
for (int i = 0; i < unionMemberCount; i++) {
unionType.add(readType());
}
return unionType;
case TypeTags.PACKAGE:
break;
case TypeTags.NONE:
return symTable.noType;
case TypeTags.VOID:
break;
case TypeTags.XMLNS:
break;
case TypeTags.ANNOTATION:
break;
case TypeTags.SEMANTIC_ERROR:
break;
case TypeTags.ERROR:
BTypeSymbol errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY,
env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BErrorType errorType = new BErrorType(errorSymbol);
compositeStack.push(errorType);
BType reasonType = readType();
BType detailsType = readType();
errorType.reasonType = reasonType;
errorType.detailType = detailsType;
errorSymbol.type = errorType;
Object poppedErrorType = compositeStack.pop();
assert poppedErrorType == errorType;
return errorType;
case TypeTags.ITERATOR:
break;
case TypeTags.TUPLE:
BTypeSymbol tupleTypeSymbol = Symbols.createTypeSymbol(SymTag.TUPLE_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BTupleType bTupleType = new BTupleType(tupleTypeSymbol, null);
int tupleMemberCount = inputStream.readInt();
List<BType> tupleMemberTypes = new ArrayList<>();
for (int i = 0; i < tupleMemberCount; i++) {
tupleMemberTypes.add(readType());
}
bTupleType.tupleTypes = tupleMemberTypes;
return bTupleType;
case TypeTags.FUTURE:
BFutureType bFutureType = new BFutureType(TypeTags.FUTURE, null, symTable.futureType.tsymbol);
bFutureType.constraint = readType();
return bFutureType;
case TypeTags.INTERMEDIATE_COLLECTION:
break;
case TypeTags.FINITE:
BTypeSymbol symbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, 0, Names.EMPTY,
env.pkgSymbol.pkgID, null, env.pkgSymbol);
symbol.scope = new Scope(symbol);
BFiniteType finiteType = new BFiniteType(symbol);
symbol.type = finiteType;
int valueSpaceSize = inputStream.readInt();
for (int i = 0; i < valueSpaceSize; i++) {
defineValueSpace(inputStream, finiteType);
}
return finiteType;
case TypeTags.OBJECT:
boolean service = inputStream.readByte() == 1;
String objName = getUTF8CPEntryValue(inputStream);
int objFlags = (inputStream.readBoolean() ? Flags.ABSTRACT : 0) | Flags.PUBLIC;
BObjectTypeSymbol objectSymbol = (BObjectTypeSymbol) Symbols.createObjectSymbol(objFlags,
names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol);
objectSymbol.scope = new Scope(objectSymbol);
objectSymbol.methodScope = new Scope(objectSymbol);
BObjectType objectType;
if (service) {
objectType = new BServiceType(objectSymbol);
} else {
objectType = new BObjectType(objectSymbol);
}
objectSymbol.type = objectType;
compositeStack.push(objectType);
int fieldCount = inputStream.readInt();
for (int i = 0; i < fieldCount; i++) {
String fieldName = getUTF8CPEntryValue(inputStream);
int fieldFlags = 0;
fieldFlags = visibilityAsMask(fieldFlags, inputStream.readByte());
BType fieldType = readType();
BVarSymbol objectVarSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName),
objectSymbol.pkgID, fieldType, objectSymbol.scope.owner);
objectSymbol.scope.define(objectVarSymbol.name, objectVarSymbol);
}
int funcCount = inputStream.readInt();
for (int i = 0; i < funcCount; i++) {
String funcName = getUTF8CPEntryValue(inputStream);
int funcFlags = 0;
funcFlags = visibilityAsMask(funcFlags, inputStream.readByte());
BInvokableType funcType = (BInvokableType) readType();
BInvokableSymbol invokableSymbol = Symbols.createFunctionSymbol(funcFlags,
names.fromString(funcName), env.pkgSymbol.pkgID, funcType,
env.pkgSymbol, Symbols.isFlagOn(objFlags, Flags.NATIVE));
invokableSymbol.retType = funcType.retType;
BAttachedFunction attachedFunc =
new BAttachedFunction(names.fromString(funcName), invokableSymbol, funcType);
objectSymbol.attachedFuncs.add(attachedFunc);
if (Names.OBJECT_INIT_SUFFIX.value.equals(funcName)
|| funcName.equals(Names.INIT_FUNCTION_SUFFIX.value)) {
objectSymbol.initializerFunc = attachedFunc;
}
}
Object poppedObjType = compositeStack.pop();
assert poppedObjType == objectType;
return objectType;
case TypeTags.BYTE_ARRAY:
break;
case TypeTags.FUNCTION_POINTER:
break;
case TYPE_TAG_SELF:
int index = inputStream.readInt();
return (BType) compositeStack.get(index);
case SERVICE_TYPE_TAG:
return symTable.anyServiceType;
}
return null;
}
|
public BType readType() throws IOException {
byte tag = inputStream.readByte();
switch (tag) {
case TypeTags.INT:
return symTable.intType;
case TypeTags.BYTE:
return symTable.byteType;
case TypeTags.FLOAT:
return symTable.floatType;
case TypeTags.DECIMAL:
return symTable.decimalType;
case TypeTags.STRING:
return symTable.stringType;
case TypeTags.BOOLEAN:
return symTable.booleanType;
case TypeTags.JSON:
return symTable.jsonType;
case TypeTags.XML:
return symTable.xmlType;
case TypeTags.TABLE:
BTableType bTableType = new BTableType(TypeTags.TABLE, null, symTable.tableType.tsymbol);
bTableType.constraint = readType();
return bTableType;
case TypeTags.NIL:
return symTable.nilType;
case TypeTags.ANYDATA:
return symTable.anydataType;
case TypeTags.RECORD:
String name = getUTF8CPEntryValue(inputStream);
BRecordTypeSymbol recordSymbol = Symbols.createRecordSymbol(Flags.asMask(EnumSet.of(Flag.PUBLIC)),
names.fromString(name), env.pkgSymbol.pkgID, null, env.pkgSymbol);
recordSymbol.scope = new Scope(recordSymbol);
BRecordType recordType = new BRecordType(recordSymbol);
recordSymbol.type = recordType;
compositeStack.push(recordType);
recordType.sealed = inputStream.readBoolean();
recordType.restFieldType = readType();
int recordFields = inputStream.readInt();
for (int i = 0; i < recordFields; i++) {
String fieldName = getUTF8CPEntryValue(inputStream);
BType fieldType = readType();
BVarSymbol varSymbol = new BVarSymbol(0, names.fromString(fieldName),
recordSymbol.pkgID, fieldType, recordSymbol.scope.owner);
recordSymbol.scope.define(varSymbol.name, varSymbol);
}
getUTF8CPEntryValue(inputStream);
inputStream.readByte();
readType();
Object poppedRecordType = compositeStack.pop();
assert poppedRecordType == recordType;
return recordType;
case TypeTags.TYPEDESC:
return symTable.typeDesc;
case TypeTags.STREAM:
BStreamType bStreamType = new BStreamType(TypeTags.STREAM, null, symTable.streamType.tsymbol);
bStreamType.constraint = readType();
return bStreamType;
case TypeTags.MAP:
BMapType bMapType = new BMapType(TypeTags.MAP, null, symTable.mapType.tsymbol);
bMapType.constraint = readType();
return bMapType;
case TypeTags.INVOKABLE:
BTypeSymbol tsymbol = Symbols.createTypeSymbol(SymTag.FUNCTION_TYPE,
Flags.asMask(EnumSet.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null,
env.pkgSymbol.owner);
BInvokableType bInvokableType = new BInvokableType(null, null, tsymbol);
int paramCount = inputStream.readInt();
List<BType> paramTypes = new ArrayList<>();
for (int i = 0; i < paramCount; i++) {
paramTypes.add(readType());
}
BType retType = readType();
bInvokableType.paramTypes = paramTypes;
bInvokableType.retType = retType;
return bInvokableType;
case TypeTags.ANY:
return symTable.anyType;
case TypeTags.ENDPOINT:
break;
case TypeTags.ARRAY:
byte state = inputStream.readByte();
int size = inputStream.readInt();
BTypeSymbol arrayTypeSymbol = Symbols.createTypeSymbol(SymTag.ARRAY_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BArrayType bArrayType = new BArrayType(null, arrayTypeSymbol, size, BArrayState.valueOf(state));
bArrayType.eType = readType();
return bArrayType;
case TypeTags.UNION:
BTypeSymbol unionTypeSymbol = Symbols.createTypeSymbol(SymTag.UNION_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BUnionType unionType = BUnionType.create(unionTypeSymbol,
new LinkedHashSet<>());
int unionMemberCount = inputStream.readInt();
for (int i = 0; i < unionMemberCount; i++) {
unionType.add(readType());
}
return unionType;
case TypeTags.PACKAGE:
break;
case TypeTags.NONE:
return symTable.noType;
case TypeTags.VOID:
break;
case TypeTags.XMLNS:
break;
case TypeTags.ANNOTATION:
break;
case TypeTags.SEMANTIC_ERROR:
break;
case TypeTags.ERROR:
BTypeSymbol errorSymbol = new BErrorTypeSymbol(SymTag.ERROR, Flags.PUBLIC, Names.EMPTY,
env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BErrorType errorType = new BErrorType(errorSymbol);
compositeStack.push(errorType);
BType reasonType = readType();
BType detailsType = readType();
errorType.reasonType = reasonType;
errorType.detailType = detailsType;
errorSymbol.type = errorType;
Object poppedErrorType = compositeStack.pop();
assert poppedErrorType == errorType;
return errorType;
case TypeTags.ITERATOR:
break;
case TypeTags.TUPLE:
BTypeSymbol tupleTypeSymbol = Symbols.createTypeSymbol(SymTag.TUPLE_TYPE, Flags.asMask(EnumSet
.of(Flag.PUBLIC)), Names.EMPTY, env.pkgSymbol.pkgID, null, env.pkgSymbol.owner);
BTupleType bTupleType = new BTupleType(tupleTypeSymbol, null);
int tupleMemberCount = inputStream.readInt();
List<BType> tupleMemberTypes = new ArrayList<>();
for (int i = 0; i < tupleMemberCount; i++) {
tupleMemberTypes.add(readType());
}
bTupleType.tupleTypes = tupleMemberTypes;
return bTupleType;
case TypeTags.FUTURE:
BFutureType bFutureType = new BFutureType(TypeTags.FUTURE, null, symTable.futureType.tsymbol);
bFutureType.constraint = readType();
return bFutureType;
case TypeTags.INTERMEDIATE_COLLECTION:
break;
case TypeTags.FINITE:
BTypeSymbol symbol = Symbols.createTypeSymbol(SymTag.FINITE_TYPE, 0, Names.EMPTY,
env.pkgSymbol.pkgID, null, env.pkgSymbol);
symbol.scope = new Scope(symbol);
BFiniteType finiteType = new BFiniteType(symbol);
symbol.type = finiteType;
int valueSpaceSize = inputStream.readInt();
for (int i = 0; i < valueSpaceSize; i++) {
defineValueSpace(inputStream, finiteType);
}
return finiteType;
case TypeTags.OBJECT:
boolean service = inputStream.readByte() == 1;
String objName = getUTF8CPEntryValue(inputStream);
int objFlags = (inputStream.readBoolean() ? Flags.ABSTRACT : 0) | Flags.PUBLIC;
BObjectTypeSymbol objectSymbol = (BObjectTypeSymbol) Symbols.createObjectSymbol(objFlags,
names.fromString(objName), env.pkgSymbol.pkgID, null, env.pkgSymbol);
objectSymbol.scope = new Scope(objectSymbol);
objectSymbol.methodScope = new Scope(objectSymbol);
BObjectType objectType;
if (service) {
objectType = new BServiceType(objectSymbol);
} else {
objectType = new BObjectType(objectSymbol);
}
objectSymbol.type = objectType;
compositeStack.push(objectType);
int fieldCount = inputStream.readInt();
for (int i = 0; i < fieldCount; i++) {
String fieldName = getUTF8CPEntryValue(inputStream);
int fieldFlags = 0;
fieldFlags = visibilityAsMask(fieldFlags, inputStream.readByte());
BType fieldType = readType();
BVarSymbol objectVarSymbol = new BVarSymbol(fieldFlags, names.fromString(fieldName),
objectSymbol.pkgID, fieldType, objectSymbol.scope.owner);
objectSymbol.scope.define(objectVarSymbol.name, objectVarSymbol);
}
int funcCount = inputStream.readInt();
for (int i = 0; i < funcCount; i++) {
String funcName = getUTF8CPEntryValue(inputStream);
int funcFlags = 0;
funcFlags = visibilityAsMask(funcFlags, inputStream.readByte());
BInvokableType funcType = (BInvokableType) readType();
BInvokableSymbol invokableSymbol = Symbols.createFunctionSymbol(funcFlags,
names.fromString(funcName), env.pkgSymbol.pkgID, funcType,
env.pkgSymbol, Symbols.isFlagOn(objFlags, Flags.NATIVE));
invokableSymbol.retType = funcType.retType;
BAttachedFunction attachedFunc =
new BAttachedFunction(names.fromString(funcName), invokableSymbol, funcType);
objectSymbol.attachedFuncs.add(attachedFunc);
if (Names.OBJECT_INIT_SUFFIX.value.equals(funcName)
|| funcName.equals(Names.INIT_FUNCTION_SUFFIX.value)) {
objectSymbol.initializerFunc = attachedFunc;
}
}
Object poppedObjType = compositeStack.pop();
assert poppedObjType == objectType;
return objectType;
case TypeTags.BYTE_ARRAY:
break;
case TypeTags.FUNCTION_POINTER:
break;
case TYPE_TAG_SELF:
int index = inputStream.readInt();
return (BType) compositeStack.get(index);
case SERVICE_TYPE_TAG:
return symTable.anyServiceType;
}
return null;
}
|
class BIRTypeReader {
public static final int TYPE_TAG_SELF = 50;
public static final int SERVICE_TYPE_TAG = 51;
private DataInputStream inputStream;
private LinkedList<Object> compositeStack = new LinkedList<>();
public BIRTypeReader(DataInputStream inputStream) {
this.inputStream = inputStream;
}
}
|
class BIRTypeReader {
public static final int TYPE_TAG_SELF = 50;
public static final int SERVICE_TYPE_TAG = 51;
private DataInputStream inputStream;
private LinkedList<Object> compositeStack = new LinkedList<>();
public BIRTypeReader(DataInputStream inputStream) {
this.inputStream = inputStream;
}
}
|
|
There should also be a check to ensure there is at least one method that starts with "build".
|
public void visitToken(DetailAST token) {
switch (token.getType()) {
case TokenTypes.CLASS_DEF:
hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation);
final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token);
final String className = token.findFirstToken(TokenTypes.IDENT).getText();
hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null;
if (hasServiceClientBuilderAnnotation) {
if (!className.endsWith("ClientBuilder")) {
log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className));
}
} else {
if (className.endsWith("ClientBuilder")) {
log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className));
}
}
break;
case TokenTypes.METHOD_DEF:
if (!hasServiceClientBuilderAnnotation) {
return;
}
final String methodName = token.findFirstToken(TokenTypes.IDENT).getText();
if (methodName.startsWith("build") && !BUILD_ASYNC_CLIENT.equals(methodName) && !BUILD_CLIENT.equals(methodName)) {
log(token, String.format(
"@ServiceClientBuilder class should not have a method name, '''' starting with ''build'' " +
"other than ''buildClient'' or ''buildAsyncClient''." , methodName));
}
break;
default:
break;
}
}
|
if (methodName.startsWith("build") && !BUILD_ASYNC_CLIENT.equals(methodName) && !BUILD_CLIENT.equals(methodName)) {
|
public void visitToken(DetailAST token) {
switch (token.getType()) {
case TokenTypes.CLASS_DEF:
hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation);
hasBuildMethodStack.push(hasBuildMethod);
final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token);
final String className = token.findFirstToken(TokenTypes.IDENT).getText();
hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null;
if (hasServiceClientBuilderAnnotation) {
if (!className.endsWith("ClientBuilder")) {
log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className));
}
} else {
if (className.endsWith("ClientBuilder")) {
log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className));
}
}
break;
case TokenTypes.METHOD_DEF:
if (!hasServiceClientBuilderAnnotation) {
return;
}
final String methodName = token.findFirstToken(TokenTypes.IDENT).getText();
if (!methodName.startsWith("build")) {
break;
}
hasBuildMethod = true;
if (!methodName.endsWith("Client")) {
log(token, String.format(
"@ServiceClientBuilder class should not have a method name, ''%s'' starting with ''build'' but not ending with ''Client''." , methodName));
}
break;
default:
break;
}
}
|
class ServiceClientBuilderCheck extends AbstractCheck {
private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder";
private static final String BUILD_CLIENT = "buildClient";
private static final String BUILD_ASYNC_CLIENT = "buildAsyncClient";
private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack();
private boolean hasServiceClientBuilderAnnotation;
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {
TokenTypes.CLASS_DEF,
TokenTypes.METHOD_DEF
};
}
@Override
public void leaveToken(DetailAST token) {
switch (token.getType()) {
case TokenTypes.CLASS_DEF:
hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop();
break;
default:
break;
}
}
@Override
/**
* Checks if the class is annotated with @ServiceClientBuilder.
*
* @param classDefToken the CLASS_DEF AST node
* @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise.
*/
private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) {
final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS);
if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) {
return null;
}
DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION);
if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) {
return null;
}
return annotationToken;
}
}
|
class ServiceClientBuilderCheck extends AbstractCheck {
private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder";
private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack();
private Stack<Boolean> hasBuildMethodStack = new Stack<>();
private boolean hasServiceClientBuilderAnnotation;
private boolean hasBuildMethod;
@Override
public int[] getDefaultTokens() {
return getRequiredTokens();
}
@Override
public int[] getAcceptableTokens() {
return getRequiredTokens();
}
@Override
public int[] getRequiredTokens() {
return new int[] {
TokenTypes.CLASS_DEF,
TokenTypes.METHOD_DEF
};
}
@Override
public void leaveToken(DetailAST token) {
if (token.getType() == TokenTypes.CLASS_DEF) {
hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop();
hasBuildMethod = hasBuildMethodStack.pop();
if (hasServiceClientBuilderAnnotation && !hasBuildMethod) {
log(token, "Class with @ServiceClientBuilder annotation must have a method starting with ''build'' and ending with ''Client''.");
}
}
}
@Override
/**
* Checks if the class is annotated with @ServiceClientBuilder.
*
* @param classDefToken the CLASS_DEF AST node
* @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise.
*/
private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) {
final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS);
if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) {
return null;
}
DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION);
if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) {
return null;
}
return annotationToken;
}
}
|
Yes, I think this is the very purpose of `WrappingRuntimeException`: ``` /** * A runtime exception that is explicitly used to wrap non-runtime exceptions. ``` Or did I misunderstand something?
|
public CheckpointStreamFactory resolveCheckpointStorageLocation(long checkpointId, CheckpointStorageLocationReference reference) {
return cache.computeIfAbsent(checkpointId, id -> {
try {
return delegate.resolveCheckpointStorageLocation(checkpointId, reference);
} catch (IOException e) {
throw new WrappingRuntimeException(e);
}
});
}
|
throw new WrappingRuntimeException(e);
|
public CheckpointStreamFactory resolveCheckpointStorageLocation(long checkpointId, CheckpointStorageLocationReference reference) {
return cache.computeIfAbsent(checkpointId, id -> {
try {
return delegate.resolveCheckpointStorageLocation(checkpointId, reference);
} catch (IOException e) {
throw new FlinkRuntimeException(e);
}
});
}
|
class CachingCheckpointStorageWorkerView implements CheckpointStorageWorkerView {
private final Map<Long, CheckpointStreamFactory> cache = new ConcurrentHashMap<>();
private final CheckpointStorageWorkerView delegate;
private CachingCheckpointStorageWorkerView(CheckpointStorageWorkerView delegate) {
this.delegate = delegate;
}
void clearCacheFor(long checkpointId) {
cache.remove(checkpointId);
}
@Override
@Override
public CheckpointStreamFactory.CheckpointStateOutputStream createTaskOwnedStateStream() throws IOException {
return delegate.createTaskOwnedStateStream();
}
}
|
class CachingCheckpointStorageWorkerView implements CheckpointStorageWorkerView {
private final Map<Long, CheckpointStreamFactory> cache = new ConcurrentHashMap<>();
private final CheckpointStorageWorkerView delegate;
private CachingCheckpointStorageWorkerView(CheckpointStorageWorkerView delegate) {
this.delegate = delegate;
}
void clearCacheFor(long checkpointId) {
cache.remove(checkpointId);
}
@Override
@Override
public CheckpointStreamFactory.CheckpointStateOutputStream createTaskOwnedStateStream() throws IOException {
return delegate.createTaskOwnedStateStream();
}
}
|
What about other ref types?
|
private static MapValue buildMapValue(Map<String, Object> section) {
MapValue map = new MapValueImpl<String, Object>();
section.forEach((key, val) -> {
if (val instanceof String || val instanceof Long || val instanceof Double || val instanceof Boolean) {
map.put(key, val);
}
});
return map;
}
|
map.put(key, val);
|
private static MapValue buildMapValue(Map<String, Object> section) {
MapValue map = new MapValueImpl<String, Object>();
section.forEach((key, val) -> {
if (val instanceof String || val instanceof Long || val instanceof Double || val instanceof Boolean) {
map.put(key, val);
}
});
return map;
}
|
class GetConfig extends BlockingNativeCallableUnit {
private static final ConfigRegistry configRegistry = ConfigRegistry.getInstance();
@Override
public void execute(Context context) {
String configKey = context.getStringArgument(0);
BString type = (BString) context.getNullableRefArgument(0);
switch (type.stringValue()) {
case "STRING":
String val = configRegistry.getAsString(configKey);
context.setReturnValues(val != null ? new BString(val) : null);
break;
case "INT":
context.setReturnValues(new BInteger(configRegistry.getAsInt(configKey)));
break;
case "FLOAT":
context.setReturnValues(new BFloat(configRegistry.getAsFloat(configKey)));
break;
case "BOOLEAN":
context.setReturnValues(new BBoolean(configRegistry.getAsBoolean(configKey)));
break;
case "MAP":
context.setReturnValues(buildBMap(configRegistry.getAsMap(configKey)));
break;
default:
throw new IllegalStateException("invalid value type: " + type.stringValue());
}
}
private BMap buildBMap(Map<String, Object> section) {
BMap map = new BMap();
section.entrySet().forEach(entry -> {
Object val = entry.getValue();
if (val instanceof String) {
map.put(entry.getKey(), new BString((String) val));
} else if (val instanceof Long) {
map.put(entry.getKey(), new BInteger((Long) val));
} else if (val instanceof Double) {
map.put(entry.getKey(), new BFloat((Double) val));
} else if (val instanceof Boolean) {
map.put(entry.getKey(), new BBoolean((Boolean) val));
}
});
return map;
}
public static Object get(Strand strand, String configKey, Object type) {
switch (type.toString()) {
case "STRING":
return configRegistry.getAsString(configKey);
case "INT":
return configRegistry.getAsInt(configKey);
case "FLOAT":
return configRegistry.getAsFloat(configKey);
case "BOOLEAN":
return configRegistry.getAsBoolean(configKey);
case "MAP":
return buildMapValue(configRegistry.getAsMap(configKey));
default:
throw new IllegalStateException("invalid value type: " + type.toString());
}
}
@SuppressWarnings("unchecked")
}
|
class GetConfig extends BlockingNativeCallableUnit {
private static final ConfigRegistry configRegistry = ConfigRegistry.getInstance();
@Override
public void execute(Context context) {
String configKey = context.getStringArgument(0);
BString type = (BString) context.getNullableRefArgument(0);
switch (type.stringValue()) {
case "STRING":
String val = configRegistry.getAsString(configKey);
context.setReturnValues(val != null ? new BString(val) : null);
break;
case "INT":
context.setReturnValues(new BInteger(configRegistry.getAsInt(configKey)));
break;
case "FLOAT":
context.setReturnValues(new BFloat(configRegistry.getAsFloat(configKey)));
break;
case "BOOLEAN":
context.setReturnValues(new BBoolean(configRegistry.getAsBoolean(configKey)));
break;
case "MAP":
context.setReturnValues(buildBMap(configRegistry.getAsMap(configKey)));
break;
default:
throw new IllegalStateException("invalid value type: " + type.stringValue());
}
}
private BMap buildBMap(Map<String, Object> section) {
BMap map = new BMap();
section.entrySet().forEach(entry -> {
Object val = entry.getValue();
if (val instanceof String) {
map.put(entry.getKey(), new BString((String) val));
} else if (val instanceof Long) {
map.put(entry.getKey(), new BInteger((Long) val));
} else if (val instanceof Double) {
map.put(entry.getKey(), new BFloat((Double) val));
} else if (val instanceof Boolean) {
map.put(entry.getKey(), new BBoolean((Boolean) val));
}
});
return map;
}
public static Object get(Strand strand, String configKey, Object type) {
switch (type.toString()) {
case "STRING":
return configRegistry.getAsString(configKey);
case "INT":
return configRegistry.getAsInt(configKey);
case "FLOAT":
return configRegistry.getAsFloat(configKey);
case "BOOLEAN":
return configRegistry.getAsBoolean(configKey);
case "MAP":
return buildMapValue(configRegistry.getAsMap(configKey));
default:
throw new IllegalStateException("invalid value type: " + type.toString());
}
}
@SuppressWarnings("unchecked")
}
|
I think we should disable this behavior. If a user has a sink after `SqlTransform` they would probably expect the fields in the sink to match the fields in the query.
|
public void testValues_selectEmpty() throws Exception {
String sql = "select 1, '1' FROM string_table WHERE false";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.getRows());
pipeline.run();
}
|
String sql = "select 1, '1' FROM string_table WHERE false";
|
public void testValues_selectEmpty() throws Exception {
String sql = "select 1, '1' FROM string_table WHERE false";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.getRows());
pipeline.run();
}
|
class BeamValuesRelTest extends BaseRelTest {
@Rule public final TestPipeline pipeline = TestPipeline.create();
@BeforeClass
public static void prepare() {
registerTable(
"string_table",
TestBoundedTable.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description"));
registerTable(
"int_table",
TestBoundedTable.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1"));
}
@Test
public void testValues() throws Exception {
String sql =
"insert into string_table(name, description) values "
+ "('hello', 'world'), ('james', 'bond')";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description")
.addRows(
"hello", "world",
"james", "bond")
.getRows());
pipeline.run();
}
@Test
public void testValues_castInt() throws Exception {
String sql = "insert into int_table (c0, c1) values(cast(1 as int), cast(2 as int))";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1")
.addRows(1, 2)
.getRows());
pipeline.run();
}
@Test
public void testValues_onlySelect() throws Exception {
String sql = "select 1, '1'";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.addRows(1, "1")
.getRows());
pipeline.run();
}
@Test
}
|
class BeamValuesRelTest extends BaseRelTest {
@Rule public final TestPipeline pipeline = TestPipeline.create();
@BeforeClass
public static void prepare() {
registerTable(
"string_table",
TestBoundedTable.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description"));
registerTable(
"int_table",
TestBoundedTable.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1"));
}
@Test
public void testValues() throws Exception {
String sql =
"insert into string_table(name, description) values "
+ "('hello', 'world'), ('james', 'bond')";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.STRING, "name",
Schema.FieldType.STRING, "description")
.addRows(
"hello", "world",
"james", "bond")
.getRows());
pipeline.run();
}
@Test
public void testValues_castInt() throws Exception {
String sql = "insert into int_table (c0, c1) values(cast(1 as int), cast(2 as int))";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "c0",
Schema.FieldType.INT32, "c1")
.addRows(1, 2)
.getRows());
pipeline.run();
}
@Test
public void testValues_onlySelect() throws Exception {
String sql = "select 1, '1'";
PCollection<Row> rows = compilePipeline(sql, pipeline);
PAssert.that(rows)
.containsInAnyOrder(
TestUtils.RowsBuilder.of(
Schema.FieldType.INT32, "EXPR$0",
Schema.FieldType.STRING, "EXPR$1")
.addRows(1, "1")
.getRows());
pipeline.run();
}
@Test
}
|
we should call to httpResponse.close() here.
|
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
}
|
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
throw LOGGER.logExceptionAsError(
new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
HttpPipelineNextSyncPolicy nextPolicy = next.clone();
authorizeRequestSync(context);
HttpResponse httpResponse = next.processSync();
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
if (authorizeRequestOnChallengeSync(context, httpResponse)) {
httpResponse.close();
return nextPolicy.processSync();
} else {
return httpResponse;
}
}
return httpResponse;
}
|
class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
}
|
class ContainerRegistryCredentialsPolicy extends BearerTokenAuthenticationPolicy {
private static final ClientLogger LOGGER = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
private static final String BEARER = "Bearer";
public static final Pattern AUTHENTICATION_CHALLENGE_PARAMS_PATTERN =
Pattern.compile("(?:(\\w+)=\"([^\"\"]*)\")+");
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
public static final String SCOPES_PARAMETER = "scope";
public static final String SERVICE_PARAMETER = "service";
public static final String AUTHORIZATION = "Authorization";
private final ContainerRegistryTokenService tokenService;
private final ClientLogger logger = new ClientLogger(ContainerRegistryCredentialsPolicy.class);
/**
* Creates an instance of ContainerRegistryCredentialsPolicy.
*
* @param tokenService the token generation service.
*/
public ContainerRegistryCredentialsPolicy(ContainerRegistryTokenService tokenService) {
super(tokenService);
this.tokenService = tokenService;
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> authorizeRequest(HttpPipelineCallContext context) {
return Mono.empty();
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request conext to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public Mono<Void> setAuthorizationHeader(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
return tokenService.getToken(tokenRequestContext)
.flatMap((token) -> {
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
return Mono.empty();
});
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if ("http".equals(context.getHttpRequest().getUrl().getProtocol())) {
return Mono.error(new RuntimeException("token credentials require a URL using the HTTPS protocol scheme"));
}
if (context.getHttpRequest().getBody() != null) {
context.getHttpRequest().setBody(context.getHttpRequest().getBody().map(buffer -> buffer.duplicate()));
}
HttpPipelineNextPolicy nextPolicy = next.clone();
return authorizeRequest(context)
.then(Mono.defer(() -> next.process()))
.flatMap(httpResponse -> {
String authHeader = httpResponse.getHeaderValue(WWW_AUTHENTICATE);
if (httpResponse.getStatusCode() == 401 && authHeader != null) {
return authorizeRequestOnChallenge(context, httpResponse).flatMap(retry -> {
if (retry) {
return nextPolicy.process()
.doFinally(ignored -> {
httpResponse.close();
});
} else {
return Mono.just(httpResponse);
}
});
}
return Mono.just(httpResponse);
});
}
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public Mono<Boolean> authorizeRequestOnChallenge(HttpPipelineCallContext context, HttpResponse response) {
return Mono.defer(() -> {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return Mono.just(false);
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
return setAuthorizationHeader(context, new ContainerRegistryTokenRequestContext(serviceName, scope))
.then(Mono.defer(() -> Mono.just(true)));
}
return Mono.just(false);
}
});
}
/**
* Executed before sending the initial request and authenticates the request.
*
* @param context The request context.
* @return A {@link Mono} containing {@link Void}
*/
@Override
public void authorizeRequestSync(HttpPipelineCallContext context) {
}
/**
* Authorizes the request with the bearer token acquired using the specified {@code tokenRequestContext}
*
* @param context the HTTP pipeline context.
* @param tokenRequestContext the token request context to be used for token acquisition.
* @return a {@link Mono} containing {@link Void}
*/
@Override
public void setAuthorizationHeaderSync(HttpPipelineCallContext context, TokenRequestContext tokenRequestContext) {
AccessToken token = tokenService.getTokenSync(tokenRequestContext);
context.getHttpRequest().getHeaders().set(AUTHORIZATION, BEARER + " " + token.getToken());
}
@Override
/**
* Handles the authentication challenge in the event a 401 response with a WWW-Authenticate authentication
* challenge header is received after the initial request and returns appropriate {@link TokenRequestContext} to
* be used for re-authentication.
*
* @param context The request context.
* @param response The Http Response containing the authentication challenge header.
* @return A {@link Mono} containing {@link Boolean}
*/
@Override
public boolean authorizeRequestOnChallengeSync(HttpPipelineCallContext context, HttpResponse response) {
String authHeader = response.getHeaderValue(WWW_AUTHENTICATE);
if (!(response.getStatusCode() == 401 && authHeader != null)) {
return false;
} else {
Map<String, String> extractedChallengeParams = parseBearerChallenge(authHeader);
if (extractedChallengeParams != null && extractedChallengeParams.containsKey(SCOPES_PARAMETER)) {
String scope = extractedChallengeParams.get(SCOPES_PARAMETER);
String serviceName = extractedChallengeParams.get(SERVICE_PARAMETER);
setAuthorizationHeaderSync(context, new ContainerRegistryTokenRequestContext(serviceName, scope));
return true;
}
return false;
}
}
private Map<String, String> parseBearerChallenge(String header) {
if (header.startsWith(BEARER)) {
String challengeParams = header.substring(BEARER.length());
Matcher matcher2 = AUTHENTICATION_CHALLENGE_PARAMS_PATTERN.matcher(challengeParams);
Map<String, String> challengeParameters = new HashMap<>();
while (matcher2.find()) {
challengeParameters.put(matcher2.group(1), matcher2.group(2));
}
return challengeParameters;
}
return null;
}
}
|
|
There's both a default and instance1 in there somewhere, but yes we need more tests when we do the orchestration part at least (but perhaps not in the API).
|
public void multiple_endpoints() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.empty(), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
for (var job : List.of(JobType.productionUsWest1, JobType.productionUsEast3, JobType.productionEuWest1)) {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/" + job.zone(SystemName.main).region().value() + "/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(job)
.application(id)
.projectId(projectId)
.submit();
}
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation?endpointId=default", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"IN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation?endpointId=eu", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation?endpointId=eu", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"IN\"}}",
200);
}
|
.instances("instance1")
|
public void multiple_endpoints() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-west-1")
.region("us-east-3")
.region("eu-west-1")
.endpoint("eu", "default", "eu-west-1")
.endpoint("default", "default", "us-west-1", "us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.empty(), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
for (var job : List.of(JobType.productionUsWest1, JobType.productionUsEast3, JobType.productionEuWest1)) {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/" + job.zone(SystemName.main).region().value() + "/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(job)
.application(id)
.projectId(projectId)
.submit();
}
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-west-1"));
setZoneInRotation("rotation-fqdn-2", ZoneId.from("prod", "us-east-3"));
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "eu-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"application 'tenant1.application1.instance1' has multiple rotations. Query parameter 'endpointId' must be given\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation?endpointId=default", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"IN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation?endpointId=eu", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"UNKNOWN\"}}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/eu-west-1/global-rotation?endpointId=eu", GET)
.userIdentity(USER_ID),
"{\"bcpStatus\":{\"rotationStatus\":\"IN\"}}",
200);
}
|
class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("dummy");
private static final ZoneId TEST_ZONE = ZoneId.from(Environment.test, RegionName.from("us-east-1"));
private static final ZoneId STAGING_ZONE = ZoneId.from(Environment.staging, RegionName.from("us-east-3"));
private ContainerControllerTester controllerTester;
private ContainerTester tester;
@Before
public void before() {
controllerTester = new ContainerControllerTester(container, responseFiles);
tester = controllerTester.containerTester();
}
@Test
public void testApplicationApi() {
tester.computeVersionStatus();
tester.controller().jobController().setRunner(__ -> { });
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user", GET).userIdentity(USER_ID),
new File("user.json"));
tester.assertResponse(request("/application/v4/user", PUT).userIdentity(USER_ID),
"{\"message\":\"Created user 'by-myuser'\"}");
tester.assertResponse(request("/application/v4/user", GET).userIdentity(USER_ID),
new File("user-which-exists.json"));
tester.assertResponse(request("/application/v4/tenant/by-myuser", DELETE).userIdentity(USER_ID),
"{\"tenant\":\"by-myuser\",\"type\":\"USER\",\"applications\":[]}");
tester.assertResponse(request("/application/v4/tenant/", GET).userIdentity(USER_ID),
new File("tenant-list.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant-with-contact-info.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(entity::data)))
.userIdentity(USER_ID),
new File("deploy-result.json"));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
long screwdriverProjectId = 123;
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
controllerTester.jobCompletion(JobType.component)
.application(id)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackageInstance1)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in test.us-east-1\"}");
controllerTester.jobCompletion(JobType.systemTest)
.application(id)
.projectId(screwdriverProjectId)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in staging.us-east-3\"}");
controllerTester.jobCompletion(JobType.stagingTest)
.application(id)
.projectId(screwdriverProjectId)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsCentral1)
.application(id)
.projectId(screwdriverProjectId)
.unsuccessful()
.submit();
entity = createApplicationDeployData(Optional.empty(),
Optional.of(ApplicationVersion.from(BuildJob.defaultSourceRevision,
BuildJob.defaultBuildNumber - 1)),
true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"No application package found for tenant1.application1.instance1 with version 1.0.41-commit1\"}",
400);
entity = createApplicationDeployData(Optional.empty(),
Optional.of(ApplicationVersion.from(BuildJob.defaultSourceRevision,
BuildJob.defaultBuildNumber)),
true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference-2.json"));
ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default");
long screwdriverProjectId2 = 456;
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(app2.application().value()));
controllerTester.controller().applications().deploymentTrigger().triggerChange(TenantAndApplicationId.from(app2), Change.of(Version.fromString("7.0")));
controllerTester.jobCompletion(JobType.component)
.application(app2)
.projectId(screwdriverProjectId2)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----\"}"),
"{\"message\":\"Added deploy key -----BEGIN PUBLIC KEY-----\\n∠( ᐛ 」∠)_\\n-----END PUBLIC KEY-----\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----\"}"),
"{\"message\":\"Added deploy key -----BEGIN PUBLIC KEY-----\\n∠( ᐛ 」∠)_\\n-----END PUBLIC KEY-----\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"-----BEGIN PUBLIC KEY-----\\n∠( ᐛ 」∠)_\\n-----END PUBLIC KEY-----\"}"),
"{\"message\":\"Removed deploy key -----BEGIN PUBLIC KEY-----\\n∠( ᐛ 」∠)_\\n-----END PUBLIC KEY-----\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted application tenant2.application2\"}");
controllerTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
tester.computeVersionStatus();
setDeploymentMaintainedInfo(controllerTester);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(controllerTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("application1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-central-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.42-commit1' to 'no change' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for application 'tenant1.application1' at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart?hostname=host1", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"No node with the hostname host1 is known.\"}", 500);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackage, false)),
new File("deployment-job-accepted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage)),
"{\"message\":\"Application package version: 1.0.43-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService)),
"{\"message\":\"Application package version: 1.0.44-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.45-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationId app1 = ApplicationId.from("tenant1", "application1", "instance1");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/jobreport", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(asJson(DeploymentJobs.JobReport.ofComponent(app1,
1234,
123,
Optional.empty(),
BuildJob.defaultSourceRevision))),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"" + app1 + " is set up to be deployed from internally," +
" and no longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.\"}",
400);
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 1 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Unregistered 'tenant1.application1.instance1' from internal deployment pipeline.\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/jobreport", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(asJson(DeploymentJobs.JobReport.ofComponent(app1,
1234,
123,
Optional.empty(),
BuildJob.defaultSourceRevision))),
"{\"message\":\"ok\"}");
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(new UserId("new_user")),
new File("create-user-response.json"));
tester.assertResponse(request("/application/v4/user", GET)
.userIdentity(new UserId("other_user")),
"{\"user\":\"other_user\",\"tenants\":[],\"tenantExists\":false}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
}
private void addIssues(ContainerControllerTester tester, TenantAndApplicationId id) {
tester.controller().applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.of(applicationPackage), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsWest1)
.application(id)
.projectId(projectId)
.submit();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
}
@Test
@Test
public void testDeployDirectly() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/deploy", POST)
.data(entity)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
tester.upgradeSystem(tester.controller().versionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/proxy-host/environment/prod/region/us-central-1/instance/instance1/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-no-deployment.json"), 400);
}
@Test
public void testSortsDeploymentsAndJobs() {
tester.computeVersionStatus();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.empty(), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsEast3)
.application(id)
.projectId(projectId)
.submit();
applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.build();
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 101);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsWest1)
.application(id)
.projectId(projectId)
.submit();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsEast3)
.application(id)
.projectId(projectId)
.submit();
setDeploymentMaintainedInfo(controllerTester);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("application-without-change-multiple-deployments.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = (MockMeteringClient) controllerTester.containerTester().serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123)),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246)),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492))));
mockMeteringClient.setMeteringInfo(new MeteringInfo(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application1-metering.json"));
}
@Test
public void testErrorResponses() throws Exception {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/by-tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz tenant name cannot have prefix 'by-'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Application already exists\"}",
400);
ConfigServerMock configServer = serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, null));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-failure.json"), 400);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.OUT_OF_CAPACITY, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-out-of-capacity.json"), 400);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to activate application", ConfigServerException.ErrorCode.ACTIVATION_CONFLICT, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-activation-conflict.json"), 409);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Internal server error", ConfigServerException.ErrorCode.INTERNAL_SERVER_ERROR, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-internal-server-error.json"), 500);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete application 'tenant1.application1.instance1': Application not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty()));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("application-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete application; more than one instance present: [tenant1.application1, tenant1.application1.instance1]\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.default\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"tenant\":\"tenant1\",\"type\":\"ATHENS\",\"athensDomain\":\"domain2\",\"property\":\"property1\",\"applications\":[]}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
}
@Test
public void deployment_fails_on_illegal_domain_in_deployment_spec() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
}
@Test
public void deployment_succeeds_when_correct_domain_is_used() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
new File("deploy-result.json"));
}
@Test
public void deployment_fails_for_personal_tenants_when_athenzdomain_specified_and_user_not_admin() {
tester.computeVersionStatus();
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(userId),
new File("create-user-response.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.dev)
.region("us-west-1")
.build();
String expectedResult="{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.new-user is not allowed to launch services in Athenz domain domain1. Please reach out to the domain admin.\"}";
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/by-new-user/application/application1/environment/dev/region/us-west-1/instance/default", POST)
.data(entity)
.userIdentity(userId),
expectedResult,
400);
}
@Test
public void deployment_succeeds_for_personal_tenants_when_user_is_tenant_admin() {
tester.computeVersionStatus();
UserId tenantAdmin = new UserId("new_user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(tenantAdmin),
new File("create-user-response.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.dev)
.region("us-west-1")
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/by-new-user/application/application1/environment/dev/region/us-west-1/instance/default", POST)
.data(entity)
.userIdentity(tenantAdmin),
new File("deploy-result.json"));
}
@Test
public void deployment_fails_when_athenz_service_cannot_be_launched() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), false);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
String expectedResult="{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
expectedResult,
400);
}
@Test
public void redeployment_succeeds_when_not_specifying_versions_or_application_package() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.computeVersionStatus();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(Optional.empty(), true))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testJobStatusReporting() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.computeVersionStatus();
long projectId = 1;
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.build();
Version vespaVersion = new Version("6.1");
BuildJob job = new BuildJob(report -> notifyCompletion(report, controllerTester), controllerTester.containerTester().serviceRegistry().artifactRepositoryMock())
.application(app)
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
Request request = request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.data(asJson(job.type(JobType.systemTest).report()))
.userIdentity(HOSTED_VESPA_OPERATOR)
.get();
tester.assertResponse(request, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Notified of completion " +
"of system-test for tenant1.application1, but that has not been triggered; last was " +
controllerTester.controller().applications().requireInstance(app.id().defaultInstance()).deploymentJobs().jobStatus().get(JobType.systemTest).lastTriggered().get().at() + "\"}", 400);
request = request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.data(asJson(job.type(JobType.productionUsEast3).report()))
.userIdentity(HOSTED_VESPA_OPERATOR)
.get();
tester.assertResponse(request, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Notified of completion " +
"of production-us-east-3 for tenant1.application1, but that has not been triggered; last was never\"}",
400);
JobStatus recordedStatus =
tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.component);
assertNotNull("Status was recorded", recordedStatus);
assertTrue(recordedStatus.isSuccess());
assertEquals(vespaVersion, recordedStatus.lastCompleted().get().platform());
recordedStatus =
tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2);
assertNull("Status of never-triggered jobs is empty", recordedStatus);
assertTrue("All jobs have been run", tester.controller().applications().deploymentTrigger().jobsToRun().isEmpty());
}
@Test
public void testJobStatusReportingOutOfCapacity() {
controllerTester.containerTester().computeVersionStatus();
long projectId = 1;
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.build();
BuildJob job = new BuildJob(report -> notifyCompletion(report, controllerTester), controllerTester.containerTester().serviceRegistry().artifactRepositoryMock())
.application(app)
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, STAGING_ZONE);
job.type(JobType.stagingTest).error(DeploymentJobs.JobError.outOfCapacity).submit();
JobStatus jobStatus = tester.controller().applications().getInstance(app.id().defaultInstance()).get()
.deploymentJobs()
.jobStatus()
.get(JobType.stagingTest);
assertFalse(jobStatus.isSuccess());
assertEquals(DeploymentJobs.JobError.outOfCapacity, jobStatus.jobError().get());
}
@Test
public void applicationWithRoutingPolicy() {
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
controllerTester.deployCompletely(app, applicationPackage, 1, false);
RoutingPolicy policy = new RoutingPolicy(app.id().defaultInstance(),
ClusterSpec.Id.from("default"),
ZoneId.from(Environment.prod, RegionName.from("us-west-1")),
HostName.from("lb-0-canonical-name"),
Optional.of("dns-zone-1"), Set.of(EndpointId.of("c0")));
tester.controller().curator().writeRoutingPolicies(app.id().defaultInstance(), Set.of(policy));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
new File("application-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
}
private void notifyCompletion(DeploymentJobs.JobReport report, ContainerControllerTester tester) {
assertResponse(request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data(asJson(report))
.get(),
200, "{\"message\":\"ok\"}");
tester.controller().applications().deploymentTrigger().triggerReadyJobs();
}
private static byte[] asJson(DeploymentJobs.JobReport report) {
Slime slime = new Slime();
Cursor cursor = slime.setObject();
cursor.setLong("projectId", report.projectId());
cursor.setString("jobName", report.jobType().jobName());
cursor.setLong("buildNumber", report.buildNumber());
report.jobError().ifPresent(jobError -> cursor.setString("jobError", jobError.name()));
report.version().flatMap(ApplicationVersion::source).ifPresent(sr -> {
Cursor sourceRevision = cursor.setObject("sourceRevision");
sourceRevision.setString("repository", sr.repository());
sourceRevision.setString("branch", sr.branch());
sourceRevision.setString("commit", sr.commit());
});
cursor.setString("tenant", report.applicationId().tenant().value());
cursor.setString("application", report.applicationId().application().value());
cursor.setString("instance", report.applicationId().instance().value());
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private MultiPartStreamer createApplicationDeployData(ApplicationPackage applicationPackage, boolean deployDirectly) {
return createApplicationDeployData(Optional.of(applicationPackage), deployDirectly);
}
private MultiPartStreamer createApplicationDeployData(Optional<ApplicationPackage> applicationPackage, boolean deployDirectly) {
return createApplicationDeployData(applicationPackage, Optional.empty(), deployDirectly);
}
private MultiPartStreamer createApplicationDeployData(Optional<ApplicationPackage> applicationPackage,
Optional<ApplicationVersion> applicationVersion, boolean deployDirectly) {
MultiPartStreamer streamer = new MultiPartStreamer();
streamer.addJson("deployOptions", deployOptions(deployDirectly, applicationVersion));
applicationPackage.ifPresent(ap -> streamer.addBytes("applicationZip", ap.zippedContent()));
return streamer;
}
private MultiPartStreamer createApplicationSubmissionData(ApplicationPackage applicationPackage) {
return new MultiPartStreamer().addJson(EnvironmentResource.SUBMIT_OPTIONS, "{\"repository\":\"repo\",\"branch\":\"master\",\"commit\":\"d00d\",\"authorEmail\":\"a@b\"}")
.addBytes(EnvironmentResource.APPLICATION_ZIP, applicationPackage.zippedContent())
.addBytes(EnvironmentResource.APPLICATION_TEST_ZIP, "content".getBytes());
}
private String deployOptions(boolean deployDirectly, Optional<ApplicationVersion> applicationVersion) {
return "{\"vespaVersion\":null," +
"\"ignoreValidationErrors\":false," +
"\"deployDirectly\":" + deployDirectly +
applicationVersion.map(version ->
"," +
"\"buildNumber\":" + version.buildNumber().getAsLong() + "," +
"\"sourceRevision\":{" +
"\"repository\":\"" + version.source().get().repository() + "\"," +
"\"branch\":\"" + version.source().get().branch() + "\"," +
"\"commit\":\"" + version.source().get().commit() + "\"" +
"}"
).orElse("") +
"}";
}
/** Make a request with (athens) user domain1.mytenant */
private RequestBuilder request(String path, Request.Method method) {
return new RequestBuilder(path, method);
}
/**
* In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the
* mock setup to replicate the action.
*/
private void createAthenzDomainWithAdmin(AthenzDomain domain, UserId userId) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDbMock.Domain domainMock = mock.getSetup().getOrCreateDomain(domain);
domainMock.markAsVespaTenant();
domainMock.admin(AthenzUser.fromUserId(userId.id()));
}
/**
* Mock athenz service identity configuration. Simulates that configserver is allowed to launch a service
*/
private void configureAthenzIdentity(com.yahoo.vespa.athenz.api.AthenzService service, boolean allowLaunch) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDbMock.Domain domainMock = mock.getSetup().domains.computeIfAbsent(service.getDomain(), AthenzDbMock.Domain::new);
domainMock.services.put(service.getName(), new AthenzDbMock.Service(allowLaunch));
}
/**
* In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the
* mock setup to replicate the action.
*/
private void addScrewdriverUserToDeployRole(ScrewdriverId screwdriverId,
AthenzDomain domain,
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId applicationId) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzIdentity screwdriverIdentity = HostedAthenzIdentities.from(screwdriverId);
AthenzDbMock.Application athenzApplication = mock.getSetup().domains.get(domain).applications.get(applicationId);
athenzApplication.addRoleMember(ApplicationAction.deploy, screwdriverIdentity);
}
private ApplicationId createTenantAndApplication() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
addScrewdriverUserToDeployRole(SCREWDRIVER_ID, ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
return ApplicationId.from("tenant1", "application1", "instance1");
}
private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application,
long projectId, ApplicationPackage applicationPackage,
MultiPartStreamer deployData, long buildNumber) {
ContainerTester tester = controllerTester.containerTester();
controllerTester.containerTester().serviceRegistry().artifactRepositoryMock()
.put(application, applicationPackage,"1.0." + buildNumber + "-commit1");
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(projectId)
.buildNumber(buildNumber)
.submit();
String testPath = String.format("/application/v4/tenant/%s/application/%s/instance/%s/environment/test/region/us-east-1",
application.tenant().value(), application.application().value(), application.instance().value());
tester.assertResponse(request(testPath, POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request(testPath, DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated " + application + " in test.us-east-1\"}");
controllerTester.jobCompletion(JobType.systemTest)
.application(application)
.projectId(projectId)
.submit();
String stagingPath = String.format("/application/v4/tenant/%s/application/%s/instance/%s/environment/staging/region/us-east-3",
application.tenant().value(), application.application().value(), application.instance().value());
tester.assertResponse(request(stagingPath, POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request(stagingPath, DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated " + application + " in staging.us-east-3\"}");
controllerTester.jobCompletion(JobType.stagingTest)
.application(application)
.projectId(projectId)
.submit();
}
/**
* Cluster info, utilization and application and deployment metrics are maintained async by maintainers.
*
* This sets these values as if the maintainers has been ran.
*/
private void setDeploymentMaintainedInfo(ContainerControllerTester controllerTester) {
for (Application application : controllerTester.controller().applications().asList()) {
controllerTester.controller().applications().lockApplicationOrThrow(application.id(), lockedApplication -> {
lockedApplication = lockedApplication.with(new ApplicationMetrics(0.5, 0.7));
for (Instance instance : application.instances().values()) {
for (Deployment deployment : instance.deployments().values()) {
Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>();
List<String> hostnames = new ArrayList<>();
hostnames.add("host1");
hostnames.add("host2");
clusterInfo.put(ClusterSpec.Id.from("cluster1"),
new ClusterInfo("flavor1", 37, 2, 4, 50,
ClusterSpec.Type.content, hostnames));
Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
DeploymentMetrics metrics = new DeploymentMetrics(1, 2, 3, 4, 5,
Optional.of(Instant.ofEpochMilli(123123)), Map.of());
lockedApplication = lockedApplication.with(instance.name(),
lockedInstance -> lockedInstance.withClusterInfo(deployment.zone(), clusterInfo)
.withClusterUtilization(deployment.zone(), clusterUtils)
.with(deployment.zone(), metrics)
.recordActivityAt(Instant.parse("2018-06-01T10:15:30.00Z"), deployment.zone()));
}
controllerTester.controller().applications().store(lockedApplication);
}
});
}
}
private ServiceRegistryMock serviceRegistry() {
return (ServiceRegistryMock) tester.container().components().getComponent(ServiceRegistryMock.class.getName());
}
private void setZoneInRotation(String rotationName, ZoneId zone) {
serviceRegistry().globalRoutingServiceMock().setStatus(rotationName, zone, com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus.IN);
new RotationStatusUpdater(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator())).run();
}
private RotationStatus rotationStatus(Instance instance) {
return controllerTester.controller().applications().rotationRepository().getRotation(instance)
.map(rotation -> {
var rotationStatus = controllerTester.controller().serviceRegistry().globalRoutingService().getHealthStatus(rotation.name());
var statusMap = new LinkedHashMap<ZoneId, RotationState>();
rotationStatus.forEach((zone, status) -> statusMap.put(zone, RotationState.in));
return RotationStatus.from(Map.of(rotation.id(), statusMap));
})
.orElse(RotationStatus.EMPTY);
}
private void updateContactInformation() {
Contact contact = new Contact(URI.create("www.contacts.tld/1234"),
URI.create("www.properties.tld/1234"),
URI.create("www.issues.tld/1234"),
List.of(List.of("alice"), List.of("bob")), "queue", Optional.empty());
tester.controller().tenants().lockIfPresent(TenantName.from("tenant2"),
LockedTenant.Athenz.class,
lockedTenant -> tester.controller().tenants().store(lockedTenant.with(contact)));
}
private void registerContact(long propertyId) {
PropertyId p = new PropertyId(String.valueOf(propertyId));
serviceRegistry().contactRetrieverMock().addContact(p, new Contact(URI.create("www.issues.tld/" + p.id()),
URI.create("www.contacts.tld/" + p.id()),
URI.create("www.properties.tld/" + p.id()),
List.of(Collections.singletonList("alice"),
Collections.singletonList("bob")),
"queue", Optional.empty()));
}
private static class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
}
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
}
}
|
class ApplicationApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/";
private static final String pemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
"z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END PUBLIC KEY-----\n";
private static final String quotedPemPublicKey = pemPublicKey.replaceAll("\\n", "\\\\n");
private static final ApplicationPackage applicationPackageDefault = new ApplicationPackageBuilder()
.instances("default")
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final ApplicationPackage applicationPackageInstance1 = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.globalServiceId("foo")
.region("us-central-1")
.region("us-east-3")
.region("us-west-1")
.blockChange(false, true, "mon-fri", "0-8", "UTC")
.build();
private static final AthenzDomain ATHENZ_TENANT_DOMAIN = new AthenzDomain("domain1");
private static final AthenzDomain ATHENZ_TENANT_DOMAIN_2 = new AthenzDomain("domain2");
private static final ScrewdriverId SCREWDRIVER_ID = new ScrewdriverId("12345");
private static final UserId USER_ID = new UserId("myuser");
private static final UserId HOSTED_VESPA_OPERATOR = new UserId("johnoperator");
private static final OktaAccessToken OKTA_AT = new OktaAccessToken("dummy");
private static final ZoneId TEST_ZONE = ZoneId.from(Environment.test, RegionName.from("us-east-1"));
private static final ZoneId STAGING_ZONE = ZoneId.from(Environment.staging, RegionName.from("us-east-3"));
private ContainerControllerTester controllerTester;
private ContainerTester tester;
@Before
public void before() {
controllerTester = new ContainerControllerTester(container, responseFiles);
tester = controllerTester.containerTester();
}
@Test
public void testApplicationApi() {
tester.computeVersionStatus();
tester.controller().jobController().setRunner(__ -> { });
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
new File("root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/user", GET).userIdentity(USER_ID),
new File("user.json"));
tester.assertResponse(request("/application/v4/user", PUT).userIdentity(USER_ID),
"{\"message\":\"Created user 'by-myuser'\"}");
tester.assertResponse(request("/application/v4/user", GET).userIdentity(USER_ID),
new File("user-which-exists.json"));
tester.assertResponse(request("/application/v4/tenant/by-myuser", DELETE).userIdentity(USER_ID),
"{\"tenant\":\"by-myuser\",\"type\":\"USER\",\"applications\":[]}");
tester.assertResponse(request("/application/v4/tenant/", GET).userIdentity(USER_ID),
new File("tenant-list.json"));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN_2, USER_ID);
registerContact(1234);
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property2\", \"propertyId\":\"1234\"}"),
new File("tenant-without-applications-with-id.json"));
updateContactInformation();
tester.assertResponse(request("/application/v4/tenant/tenant2", GET).userIdentity(USER_ID),
new File("tenant-with-contact-info.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/", GET).userIdentity(USER_ID),
new File("application-list.json"));
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(entity::data)))
.userIdentity(USER_ID),
new File("deploy-result.json"));
ApplicationId id = ApplicationId.from("tenant1", "application1", "instance1");
long screwdriverProjectId = 123;
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
controllerTester.jobCompletion(JobType.component)
.application(id)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackageInstance1)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in test.us-east-1\"}");
controllerTester.jobCompletion(JobType.systemTest)
.application(id)
.projectId(screwdriverProjectId)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in staging.us-east-3\"}");
controllerTester.jobCompletion(JobType.stagingTest)
.application(id)
.projectId(screwdriverProjectId)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(createApplicationDeployData(Optional.empty(), false))
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsCentral1)
.application(id)
.projectId(screwdriverProjectId)
.unsuccessful()
.submit();
entity = createApplicationDeployData(Optional.empty(),
Optional.of(ApplicationVersion.from(BuildJob.defaultSourceRevision,
BuildJob.defaultBuildNumber - 1)),
true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"No application package found for tenant1.application1.instance1 with version 1.0.41-commit1\"}",
400);
entity = createApplicationDeployData(Optional.empty(),
Optional.of(ApplicationVersion.from(BuildJob.defaultSourceRevision,
BuildJob.defaultBuildNumber)),
true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/", POST)
.data(entity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.region("us-west-1")
.build();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference-2.json"));
ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default");
long screwdriverProjectId2 = 456;
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN_2,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(app2.application().value()));
controllerTester.controller().applications().deploymentTrigger().triggerChange(TenantAndApplicationId.from(app2), Change.of(Version.fromString("7.0")));
controllerTester.jobCompletion(JobType.component)
.application(app2)
.projectId(screwdriverProjectId2)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":7}"),
"{\"message\":\"Set major version to 7\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", POST)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[\"-----BEGIN PUBLIC KEY-----\\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\\nz/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\\n-----END PUBLIC KEY-----\\n\"]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"" + pemPublicKey + "\"}"),
"{\"message\":\"Added deploy key " + quotedPemPublicKey + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2-with-patches.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
.userIdentity(USER_ID)
.data("{\"majorVersion\":null}"),
"{\"message\":\"Set major version to empty\"}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/key", DELETE)
.userIdentity(USER_ID)
.data("{\"key\":\"" + pemPublicKey + "\"}"),
"{\"keys\":[]}");
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted application tenant2.application2\"}");
controllerTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
tester.computeVersionStatus();
setDeploymentMaintainedInfo(controllerTester);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-central-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("application.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("deployment.json"));
addIssues(controllerTester, TenantAndApplicationId.from("tenant1", "application1"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("deployment"),
new File("recursive-root.json"));
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
.recursive("tenant"),
new File("recursive-until-tenant-root.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("tenant1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID)
.recursive("true"),
new File("application1-recursive.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/nodes", GET)
.userIdentity(USER_ID),
new File("application-nodes.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application1/environment/dev/region/us-central-1/instance/default/logs?from=1233&to=3214", GET)
.userIdentity(USER_ID),
"INFO - All good");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Changed deployment from 'application change to 1.0.42-commit1' to 'no change' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
"{\"message\":\"No deployment in progress for application 'tenant1.application1' at this time\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin")));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
"{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1/pause", POST)
.userIdentity(USER_ID),
"{\"message\":\"production-us-west-1 for tenant1.application1.instance1 paused for " + DeploymentTrigger.maxPause + "\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-west-1", POST)
.userIdentity(USER_ID),
"{\"message\":\"Triggered production-us-west-1 for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in staging.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-central-1/instance/instance1/restart", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in test.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-central-1/instance/instance1/restart", POST)
.userIdentity(USER_ID),
"{\"message\":\"Requested restart of tenant1.application1.instance1 in dev.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart?hostname=host1", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"No node with the hostname host1 is known.\"}", 500);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/suspended", GET)
.userIdentity(USER_ID),
new File("suspended.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service", GET)
.userIdentity(USER_ID),
new File("services.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/service/storagenode-awe3slno6mmq2fye191y324jl/state/v1/", GET)
.userIdentity(USER_ID),
new File("service.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("delete-with-active-deployments.json"), 400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/production-us-central-1/test-config", GET)
.userIdentity(USER_ID),
new File("test-config.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in dev.us-west-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1", DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated tenant1.application1.instance1 in prod.us-central-1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploy/dev-us-east-1", POST)
.userIdentity(USER_ID)
.data(createApplicationDeployData(applicationPackage, false)),
new File("deployment-job-accepted.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage)),
"{\"message\":\"Application package version: 1.0.43-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationPackage packageWithServiceForWrongDomain = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN_2.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN_2, "service"), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithServiceForWrongDomain)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [domain2] must match tenant domain: [domain1]\"}", 400);
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.instances("instance1")
.environment(Environment.prod)
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from(ATHENZ_TENANT_DOMAIN.getName()), AthenzService.from("service"))
.region("us-west-1")
.build();
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService)),
"{\"message\":\"Application package version: 1.0.44-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", "not/the/right/hash")
.data(createApplicationSubmissionData(packageWithService)),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Value of X-Content-Hash header does not match computed content hash\"}", 400);
MultiPartStreamer streamer = createApplicationSubmissionData(packageWithService);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
"{\"message\":\"Application package version: 1.0.45-d00d, source revision of repository 'repo', branch 'master' with commit 'd00d', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
ApplicationId app1 = ApplicationId.from("tenant1", "application1", "instance1");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/jobreport", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(asJson(DeploymentJobs.JobReport.ofComponent(app1,
1234,
123,
Optional.empty(),
BuildJob.defaultSourceRevision))),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"" + app1 + " is set up to be deployed from internally," +
" and no longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.\"}",
400);
assertEquals(2, tester.controller().applications().deploymentTrigger().triggerReadyJobs());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job", GET)
.userIdentity(USER_ID),
new File("jobs.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test", GET)
.userIdentity(USER_ID),
new File("system-test-job.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/1", GET)
.userIdentity(USER_ID),
new File("system-test-details.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test", DELETE)
.userIdentity(USER_ID),
"{\"message\":\"Aborting run 1 of staging-test for tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"message\":\"Unregistered 'tenant1.application1.instance1' from internal deployment pipeline.\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/jobreport", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(asJson(DeploymentJobs.JobReport.ofComponent(app1,
1234,
123,
Optional.empty(),
BuildJob.defaultSourceRevision))),
"{\"message\":\"ok\"}");
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(new UserId("new_user")),
new File("create-user-response.json"));
tester.assertResponse(request("/application/v4/user", GET)
.userIdentity(new UserId("other_user")),
"{\"user\":\"other_user\",\"tenants\":[],\"tenantExists\":false}");
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS)
.userIdentity(USER_ID),
"");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE).userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
}
private void addIssues(ContainerControllerTester tester, TenantAndApplicationId id) {
tester.controller().applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
}
@Test
public void testRotationOverride() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.of(applicationPackage), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsWest1)
.application(id)
.projectId(projectId)
.submit();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"tenant2.application2 not found\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation", GET)
.userIdentity(USER_ID),
new File("global-rotation.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", GET)
.userIdentity(USER_ID),
new File("global-rotation-get.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-put.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/global-rotation/override", DELETE)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
new File("global-rotation-delete.json"));
}
@Test
@Test
public void testDeployDirectly() {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.assertResponse(request("/application/v4/tenant/tenant1", POST).userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
addScrewdriverUserToDeployRole(SCREWDRIVER_ID,
ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/deploy", POST)
.data(entity)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
MultiPartStreamer noAppEntity = createApplicationDeployData(Optional.empty(), true);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Deployment of system applications during a system upgrade is not allowed\"}",
400);
tester.upgradeSystem(tester.controller().versionStatus().controllerVersion().get().versionNumber());
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/routing/environment/prod/region/us-central-1/instance/default/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/hosted-vespa/application/proxy-host/environment/prod/region/us-central-1/instance/instance1/deploy", POST)
.data(noAppEntity)
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-no-deployment.json"), 400);
}
@Test
public void testSortsDeploymentsAndJobs() {
tester.computeVersionStatus();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.region("us-east-3")
.build();
ApplicationId id = createTenantAndApplication();
long projectId = 1;
MultiPartStreamer deployData = createApplicationDeployData(Optional.empty(), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsEast3)
.application(id)
.projectId(projectId)
.submit();
applicationPackage = new ApplicationPackageBuilder()
.instances("instance1")
.globalServiceId("foo")
.region("us-west-1")
.region("us-east-3")
.build();
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 101);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsWest1)
.application(id)
.projectId(projectId)
.submit();
setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/deploy", POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
controllerTester.jobCompletion(JobType.productionUsEast3)
.application(id)
.projectId(projectId)
.submit();
setDeploymentMaintainedInfo(controllerTester);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
.userIdentity(USER_ID),
new File("application-without-change-multiple-deployments.json"));
}
@Test
public void testMeteringResponses() {
MockMeteringClient mockMeteringClient = (MockMeteringClient) controllerTester.containerTester().serviceRegistry().meteringService();
ResourceAllocation currentSnapshot = new ResourceAllocation(1, 2, 3);
ResourceAllocation thisMonth = new ResourceAllocation(12, 24, 1000);
ResourceAllocation lastMonth = new ResourceAllocation(24, 48, 2000);
ApplicationId applicationId = ApplicationId.from("doesnotexist", "doesnotexist", "default");
Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, List.of(
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(123)),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(246)),
new ResourceSnapshot(applicationId, 1, 2,3, Instant.ofEpochMilli(492))));
mockMeteringClient.setMeteringInfo(new MeteringInfo(thisMonth, lastMonth, currentSnapshot, snapshotHistory));
tester.assertResponse(request("/application/v4/tenant/doesnotexist/application/doesnotexist/metering", GET)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application1-metering.json"));
}
@Test
public void testErrorResponses() throws Exception {
tester.computeVersionStatus();
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'tenant1' does not exist\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east/instance/default", GET)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"tenant1.application1 not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create tenant 'tenant2': The Athens domain 'domain1' is already connected to tenant 'tenant1'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'tenant1' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/my_tenant_2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"New tenant or application names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.\"}",
400);
tester.assertResponse(request("/application/v4/tenant/by-tenant2", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz tenant name cannot have prefix 'by-'\"}",
400);
tester.assertResponse(request("/application/v4/tenant/hosted-vespa", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'hosted-vespa' already exists\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.oktaAccessToken(OKTA_AT)
.userIdentity(USER_ID),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not create 'tenant1.application1.instance1': Instance already exists\"}",
400);
ConfigServerMock configServer = serviceRegistry().configServerMock();
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, null));
MultiPartStreamer entity = createApplicationDeployData(applicationPackageInstance1, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-failure.json"), 400);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", ConfigServerException.ErrorCode.OUT_OF_CAPACITY, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-out-of-capacity.json"), 400);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to activate application", ConfigServerException.ErrorCode.ACTIVATION_CONFLICT, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-activation-conflict.json"), 409);
configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Internal server error", ConfigServerException.ErrorCode.INTERNAL_SERVER_ERROR, null));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/instance1/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
new File("deploy-internal-server-error.json"), 500);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete tenant 'tenant1': This tenant has active applications\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.instance1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", DELETE)
.oktaAccessToken(OKTA_AT)
.userIdentity(USER_ID),
"{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not delete instance 'tenant1.application1.instance1': Instance not found\"}",
404);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(USER_ID),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.controller().curator().writeTenant(new AthenzTenant(TenantName.from("my_tenant"), ATHENZ_TENANT_DOMAIN,
new Property("property1"), Optional.empty(), Optional.empty()));
tester.assertResponse(request("/application/v4/tenant/my-tenant", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Tenant 'my-tenant' already exists\"}",
400);
}
@Test
public void testAuthorization() {
UserId authorizedUser = USER_ID;
UserId unauthorizedUser = new UserId("othertenant");
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"{\n \"message\" : \"Not authenticated\"\n}",
401);
tester.assertResponse(request("/application/v4/tenant/", GET)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}"),
"[]",
200);
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT)
.userIdentity(unauthorizedUser),
"{\"error-code\":\"FORBIDDEN\",\"message\":\"The user 'user.othertenant' is not admin in Athenz domain 'domain1'\"}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(unauthorizedUser)
.oktaAccessToken(OKTA_AT),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"),
200);
MultiPartStreamer entity = createApplicationDeployData(applicationPackageDefault, true);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
.data(entity)
.userIdentity(USER_ID),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", POST)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
new File("application-reference-default.json"),
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Could not delete application; more than one instance present: [tenant1.application1, tenant1.application1.instance1]\"}",
400);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/default", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted instance tenant1.application1.default\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"message\":\"Deleted application tenant1.application1\"}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
createAthenzDomainWithAdmin(new AthenzDomain("domain2"), USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", PUT)
.data("{\"athensDomain\":\"domain2\", \"property\":\"property1\"}")
.userIdentity(authorizedUser)
.oktaAccessToken(OKTA_AT),
"{\"tenant\":\"tenant1\",\"type\":\"ATHENS\",\"athensDomain\":\"domain2\",\"property\":\"property1\",\"applications\":[]}",
200);
tester.assertResponse(request("/application/v4/tenant/tenant1", DELETE)
.userIdentity(unauthorizedUser),
"{\n \"code\" : 403,\n \"message\" : \"Access denied\"\n}",
403);
}
@Test
public void deployment_fails_on_illegal_domain_in_deployment_spec() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("another.domain"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Athenz domain in deployment.xml: [another.domain] must match tenant domain: [domain1]\"}",
400);
}
@Test
public void deployment_succeeds_when_correct_domain_is_used() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
new File("deploy-result.json"));
}
@Test
public void deployment_fails_for_personal_tenants_when_athenzdomain_specified_and_user_not_admin() {
tester.computeVersionStatus();
UserId tenantAdmin = new UserId("tenant-admin");
UserId userId = new UserId("new-user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(userId),
new File("create-user-response.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.dev)
.region("us-west-1")
.build();
String expectedResult="{\"error-code\":\"BAD_REQUEST\",\"message\":\"User user.new-user is not allowed to launch services in Athenz domain domain1. Please reach out to the domain admin.\"}";
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/by-new-user/application/application1/environment/dev/region/us-west-1/instance/default", POST)
.data(entity)
.userIdentity(userId),
expectedResult,
400);
}
@Test
public void deployment_succeeds_for_personal_tenants_when_user_is_tenant_admin() {
tester.computeVersionStatus();
UserId tenantAdmin = new UserId("new_user");
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, tenantAdmin);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
byte[] data = new byte[0];
tester.assertResponse(request("/application/v4/user?user=new_user&domain=by", PUT)
.data(data)
.userIdentity(tenantAdmin),
new File("create-user-response.json"));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.dev)
.region("us-west-1")
.build();
MultiPartStreamer entity = createApplicationDeployData(applicationPackage, true);
tester.assertResponse(request("/application/v4/tenant/by-new-user/application/application1/environment/dev/region/us-west-1/instance/default", POST)
.data(entity)
.userIdentity(tenantAdmin),
new File("deploy-result.json"));
}
@Test
public void deployment_fails_when_athenz_service_cannot_be_launched() {
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), false);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
String expectedResult="{\"error-code\":\"BAD_REQUEST\",\"message\":\"Not allowed to launch Athenz service domain1.service\"}";
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
expectedResult,
400);
}
@Test
public void redeployment_succeeds_when_not_specifying_versions_or_application_package() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.computeVersionStatus();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain1"), com.yahoo.config.provision.AthenzService.from("service"))
.environment(Environment.prod)
.region("us-west-1")
.build();
long screwdriverProjectId = 123;
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(applicationPackage, false))
.screwdriverIdentity(screwdriverId),
new File("deploy-result.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/", POST)
.data(createApplicationDeployData(Optional.empty(), true))
.userIdentity(HOSTED_VESPA_OPERATOR),
new File("deploy-result.json"));
}
@Test
public void testJobStatusReporting() {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.computeVersionStatus();
long projectId = 1;
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.build();
Version vespaVersion = new Version("6.1");
BuildJob job = new BuildJob(report -> notifyCompletion(report, controllerTester), controllerTester.containerTester().serviceRegistry().artifactRepositoryMock())
.application(app)
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
Request request = request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.data(asJson(job.type(JobType.systemTest).report()))
.userIdentity(HOSTED_VESPA_OPERATOR)
.get();
tester.assertResponse(request, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Notified of completion " +
"of system-test for tenant1.application1, but that has not been triggered; last was " +
controllerTester.controller().applications().requireInstance(app.id().defaultInstance()).deploymentJobs().jobStatus().get(JobType.systemTest).lastTriggered().get().at() + "\"}", 400);
request = request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.data(asJson(job.type(JobType.productionUsEast3).report()))
.userIdentity(HOSTED_VESPA_OPERATOR)
.get();
tester.assertResponse(request, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Notified of completion " +
"of production-us-east-3 for tenant1.application1, but that has not been triggered; last was never\"}",
400);
JobStatus recordedStatus =
tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.component);
assertNotNull("Status was recorded", recordedStatus);
assertTrue(recordedStatus.isSuccess());
assertEquals(vespaVersion, recordedStatus.lastCompleted().get().platform());
recordedStatus =
tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2);
assertNull("Status of never-triggered jobs is empty", recordedStatus);
assertTrue("All jobs have been run", tester.controller().applications().deploymentTrigger().jobsToRun().isEmpty());
}
@Test
public void testJobStatusReportingOutOfCapacity() {
controllerTester.containerTester().computeVersionStatus();
long projectId = 1;
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.build();
BuildJob job = new BuildJob(report -> notifyCompletion(report, controllerTester), controllerTester.containerTester().serviceRegistry().artifactRepositoryMock())
.application(app)
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
controllerTester.deploy(app.id().defaultInstance(), applicationPackage, STAGING_ZONE);
job.type(JobType.stagingTest).error(DeploymentJobs.JobError.outOfCapacity).submit();
JobStatus jobStatus = tester.controller().applications().getInstance(app.id().defaultInstance()).get()
.deploymentJobs()
.jobStatus()
.get(JobType.stagingTest);
assertFalse(jobStatus.isSuccess());
assertEquals(DeploymentJobs.JobError.outOfCapacity, jobStatus.jobError().get());
}
@Test
public void applicationWithRoutingPolicy() {
Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
controllerTester.deployCompletely(app, applicationPackage, 1, false);
RoutingPolicy policy = new RoutingPolicy(app.id().defaultInstance(),
ClusterSpec.Id.from("default"),
ZoneId.from(Environment.prod, RegionName.from("us-west-1")),
HostName.from("lb-0-canonical-name"),
Optional.of("dns-zone-1"), Set.of(EndpointId.of("c0")));
tester.controller().curator().writeRoutingPolicies(app.id().defaultInstance(), Set.of(policy));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
.userIdentity(USER_ID),
new File("application-with-routing-policy.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default", GET)
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
}
private void notifyCompletion(DeploymentJobs.JobReport report, ContainerControllerTester tester) {
assertResponse(request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
.userIdentity(HOSTED_VESPA_OPERATOR)
.data(asJson(report))
.get(),
200, "{\"message\":\"ok\"}");
tester.controller().applications().deploymentTrigger().triggerReadyJobs();
}
private static byte[] asJson(DeploymentJobs.JobReport report) {
Slime slime = new Slime();
Cursor cursor = slime.setObject();
cursor.setLong("projectId", report.projectId());
cursor.setString("jobName", report.jobType().jobName());
cursor.setLong("buildNumber", report.buildNumber());
report.jobError().ifPresent(jobError -> cursor.setString("jobError", jobError.name()));
report.version().flatMap(ApplicationVersion::source).ifPresent(sr -> {
Cursor sourceRevision = cursor.setObject("sourceRevision");
sourceRevision.setString("repository", sr.repository());
sourceRevision.setString("branch", sr.branch());
sourceRevision.setString("commit", sr.commit());
});
cursor.setString("tenant", report.applicationId().tenant().value());
cursor.setString("application", report.applicationId().application().value());
cursor.setString("instance", report.applicationId().instance().value());
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private MultiPartStreamer createApplicationDeployData(ApplicationPackage applicationPackage, boolean deployDirectly) {
return createApplicationDeployData(Optional.of(applicationPackage), deployDirectly);
}
private MultiPartStreamer createApplicationDeployData(Optional<ApplicationPackage> applicationPackage, boolean deployDirectly) {
return createApplicationDeployData(applicationPackage, Optional.empty(), deployDirectly);
}
private MultiPartStreamer createApplicationDeployData(Optional<ApplicationPackage> applicationPackage,
Optional<ApplicationVersion> applicationVersion, boolean deployDirectly) {
MultiPartStreamer streamer = new MultiPartStreamer();
streamer.addJson("deployOptions", deployOptions(deployDirectly, applicationVersion));
applicationPackage.ifPresent(ap -> streamer.addBytes("applicationZip", ap.zippedContent()));
return streamer;
}
private MultiPartStreamer createApplicationSubmissionData(ApplicationPackage applicationPackage) {
return new MultiPartStreamer().addJson(EnvironmentResource.SUBMIT_OPTIONS, "{\"repository\":\"repo\",\"branch\":\"master\",\"commit\":\"d00d\",\"authorEmail\":\"a@b\"}")
.addBytes(EnvironmentResource.APPLICATION_ZIP, applicationPackage.zippedContent())
.addBytes(EnvironmentResource.APPLICATION_TEST_ZIP, "content".getBytes());
}
private String deployOptions(boolean deployDirectly, Optional<ApplicationVersion> applicationVersion) {
return "{\"vespaVersion\":null," +
"\"ignoreValidationErrors\":false," +
"\"deployDirectly\":" + deployDirectly +
applicationVersion.map(version ->
"," +
"\"buildNumber\":" + version.buildNumber().getAsLong() + "," +
"\"sourceRevision\":{" +
"\"repository\":\"" + version.source().get().repository() + "\"," +
"\"branch\":\"" + version.source().get().branch() + "\"," +
"\"commit\":\"" + version.source().get().commit() + "\"" +
"}"
).orElse("") +
"}";
}
/** Make a request with (athens) user domain1.mytenant */
private RequestBuilder request(String path, Request.Method method) {
return new RequestBuilder(path, method);
}
/**
* In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the
* mock setup to replicate the action.
*/
private void createAthenzDomainWithAdmin(AthenzDomain domain, UserId userId) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDbMock.Domain domainMock = mock.getSetup().getOrCreateDomain(domain);
domainMock.markAsVespaTenant();
domainMock.admin(AthenzUser.fromUserId(userId.id()));
}
/**
* Mock athenz service identity configuration. Simulates that configserver is allowed to launch a service
*/
private void configureAthenzIdentity(com.yahoo.vespa.athenz.api.AthenzService service, boolean allowLaunch) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzDbMock.Domain domainMock = mock.getSetup().domains.computeIfAbsent(service.getDomain(), AthenzDbMock.Domain::new);
domainMock.services.put(service.getName(), new AthenzDbMock.Service(allowLaunch));
}
/**
* In production this happens outside hosted Vespa, so there is no API for it and we need to reach down into the
* mock setup to replicate the action.
*/
private void addScrewdriverUserToDeployRole(ScrewdriverId screwdriverId,
AthenzDomain domain,
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId applicationId) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) container.components()
.getComponent(AthenzClientFactoryMock.class.getName());
AthenzIdentity screwdriverIdentity = HostedAthenzIdentities.from(screwdriverId);
AthenzDbMock.Application athenzApplication = mock.getSetup().domains.get(domain).applications.get(applicationId);
athenzApplication.addRoleMember(ApplicationAction.deploy, screwdriverIdentity);
}
private ApplicationId createTenantAndApplication() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
.userIdentity(USER_ID)
.data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
.oktaAccessToken(OKTA_AT),
new File("tenant-without-applications.json"));
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", POST)
.userIdentity(USER_ID)
.oktaAccessToken(OKTA_AT),
new File("application-reference.json"));
addScrewdriverUserToDeployRole(SCREWDRIVER_ID, ATHENZ_TENANT_DOMAIN,
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
return ApplicationId.from("tenant1", "application1", "instance1");
}
private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application,
long projectId, ApplicationPackage applicationPackage,
MultiPartStreamer deployData, long buildNumber) {
ContainerTester tester = controllerTester.containerTester();
controllerTester.containerTester().serviceRegistry().artifactRepositoryMock()
.put(application, applicationPackage,"1.0." + buildNumber + "-commit1");
controllerTester.jobCompletion(JobType.component)
.application(application)
.projectId(projectId)
.buildNumber(buildNumber)
.submit();
String testPath = String.format("/application/v4/tenant/%s/application/%s/instance/%s/environment/test/region/us-east-1",
application.tenant().value(), application.application().value(), application.instance().value());
tester.assertResponse(request(testPath, POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request(testPath, DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated " + application + " in test.us-east-1\"}");
controllerTester.jobCompletion(JobType.systemTest)
.application(application)
.projectId(projectId)
.submit();
String stagingPath = String.format("/application/v4/tenant/%s/application/%s/instance/%s/environment/staging/region/us-east-3",
application.tenant().value(), application.application().value(), application.instance().value());
tester.assertResponse(request(stagingPath, POST)
.data(deployData)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("deploy-result.json"));
tester.assertResponse(request(stagingPath, DELETE)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Deactivated " + application + " in staging.us-east-3\"}");
controllerTester.jobCompletion(JobType.stagingTest)
.application(application)
.projectId(projectId)
.submit();
}
/**
* Cluster info, utilization and application and deployment metrics are maintained async by maintainers.
*
* This sets these values as if the maintainers has been ran.
*/
private void setDeploymentMaintainedInfo(ContainerControllerTester controllerTester) {
for (Application application : controllerTester.controller().applications().asList()) {
controllerTester.controller().applications().lockApplicationOrThrow(application.id(), lockedApplication -> {
lockedApplication = lockedApplication.with(new ApplicationMetrics(0.5, 0.7));
for (Instance instance : application.instances().values()) {
for (Deployment deployment : instance.deployments().values()) {
Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>();
List<String> hostnames = new ArrayList<>();
hostnames.add("host1");
hostnames.add("host2");
clusterInfo.put(ClusterSpec.Id.from("cluster1"),
new ClusterInfo("flavor1", 37, 2, 4, 50,
ClusterSpec.Type.content, hostnames));
Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
DeploymentMetrics metrics = new DeploymentMetrics(1, 2, 3, 4, 5,
Optional.of(Instant.ofEpochMilli(123123)), Map.of());
lockedApplication = lockedApplication.with(instance.name(),
lockedInstance -> lockedInstance.withClusterInfo(deployment.zone(), clusterInfo)
.withClusterUtilization(deployment.zone(), clusterUtils)
.with(deployment.zone(), metrics)
.recordActivityAt(Instant.parse("2018-06-01T10:15:30.00Z"), deployment.zone()));
}
controllerTester.controller().applications().store(lockedApplication);
}
});
}
}
private ServiceRegistryMock serviceRegistry() {
return (ServiceRegistryMock) tester.container().components().getComponent(ServiceRegistryMock.class.getName());
}
private void setZoneInRotation(String rotationName, ZoneId zone) {
serviceRegistry().globalRoutingServiceMock().setStatus(rotationName, zone, com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus.IN);
new RotationStatusUpdater(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator())).run();
}
private RotationStatus rotationStatus(Instance instance) {
return controllerTester.controller().applications().rotationRepository().getRotation(instance)
.map(rotation -> {
var rotationStatus = controllerTester.controller().serviceRegistry().globalRoutingService().getHealthStatus(rotation.name());
var statusMap = new LinkedHashMap<ZoneId, RotationState>();
rotationStatus.forEach((zone, status) -> statusMap.put(zone, RotationState.in));
return RotationStatus.from(Map.of(rotation.id(), statusMap));
})
.orElse(RotationStatus.EMPTY);
}
private void updateContactInformation() {
Contact contact = new Contact(URI.create("www.contacts.tld/1234"),
URI.create("www.properties.tld/1234"),
URI.create("www.issues.tld/1234"),
List.of(List.of("alice"), List.of("bob")), "queue", Optional.empty());
tester.controller().tenants().lockIfPresent(TenantName.from("tenant2"),
LockedTenant.Athenz.class,
lockedTenant -> tester.controller().tenants().store(lockedTenant.with(contact)));
}
private void registerContact(long propertyId) {
PropertyId p = new PropertyId(String.valueOf(propertyId));
serviceRegistry().contactRetrieverMock().addContact(p, new Contact(URI.create("www.issues.tld/" + p.id()),
URI.create("www.contacts.tld/" + p.id()),
URI.create("www.properties.tld/" + p.id()),
List.of(Collections.singletonList("alice"),
Collections.singletonList("bob")),
"queue", Optional.empty()));
}
private static class RequestBuilder implements Supplier<Request> {
private final String path;
private final Request.Method method;
private byte[] data = new byte[0];
private AthenzIdentity identity;
private OktaAccessToken oktaAccessToken;
private String contentType = "application/json";
private Map<String, List<String>> headers = new HashMap<>();
private String recursive;
private RequestBuilder(String path, Request.Method method) {
this.path = path;
this.method = method;
}
private RequestBuilder data(byte[] data) { this.data = data; return this; }
private RequestBuilder data(String data) { return data(data.getBytes(StandardCharsets.UTF_8)); }
private RequestBuilder data(MultiPartStreamer streamer) {
return Exceptions.uncheck(() -> data(streamer.data().readAllBytes()).contentType(streamer.contentType()));
}
private RequestBuilder userIdentity(UserId userId) { this.identity = HostedAthenzIdentities.from(userId); return this; }
private RequestBuilder screwdriverIdentity(ScrewdriverId screwdriverId) { this.identity = HostedAthenzIdentities.from(screwdriverId); return this; }
private RequestBuilder oktaAccessToken(OktaAccessToken oktaAccessToken) { this.oktaAccessToken = oktaAccessToken; return this; }
private RequestBuilder contentType(String contentType) { this.contentType = contentType; return this; }
private RequestBuilder recursive(String recursive) { this.recursive = recursive; return this; }
private RequestBuilder header(String name, String value) {
this.headers.putIfAbsent(name, new ArrayList<>());
this.headers.get(name).add(value);
return this;
}
@Override
public Request get() {
Request request = new Request("http:
(recursive == null ? "" : "?recursive=" + recursive),
data, method);
request.getHeaders().addAll(headers);
request.getHeaders().put("Content-Type", contentType);
if (identity != null) {
addIdentityToRequest(request, identity);
}
if (oktaAccessToken != null) {
addOktaAccessToken(request, oktaAccessToken);
}
return request;
}
}
}
|
What do you think about testing the error message for containing some string - specified in the test? That would not depend less on the internals of the `FlinkCompletableFutureAssert`. ```suggestion failedFuture.completeExceptionally(new IllegalStateException("abcd")); assertThatThrownBy(() -> assertThatFuture(failedFuture).eventuallySucceeds()) .hasMessageContaining("abcd"); ``` (ditto: `"to have failed"`)
|
void testEventuallySucceedsWithFailedFuture() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException());
assertThatThrownBy(() -> assertThatFuture(failedFuture).eventuallySucceeds())
.hasMessageContaining("to have succeeded");
}
|
.hasMessageContaining("to have succeeded");
|
void testEventuallySucceedsWithFailedFuture() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException("Squirrel"));
assertThatThrownBy(() -> assertThatFuture(failedFuture).eventuallySucceeds())
.isInstanceOf(AssertionError.class)
.hasMessageContaining("Squirrel");
}
|
class FlinkAssertionsTest {
@Test
void testEventuallySucceedsWithCompletedFuture() {
final CompletableFuture<String> completedFuture =
CompletableFuture.completedFuture("Apache Flink");
assertThatFuture(completedFuture).eventuallySucceeds().isEqualTo("Apache Flink");
}
@Test
@Test
void testEventuallyFailsWithCompletedFuture() {
final CompletableFuture<String> completedFuture =
CompletableFuture.completedFuture("Apache Flink");
assertThatThrownBy(
() ->
assertThatFuture(completedFuture)
.eventuallyFailsWith(IllegalStateException.class))
.hasMessageContaining("to have failed");
}
@Test
void testEventuallyFailsWithFailedFuture() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException());
assertThatFuture(failedFuture)
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(IllegalStateException.class);
}
@Test
void testEventuallyFailsWithFailedFutureWithDifferentException() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException());
assertThatThrownBy(
() ->
assertThatFuture(failedFuture)
.eventuallyFailsWith(CancellationException.class))
.hasMessageContaining("actual throwable to be an instance of");
}
}
|
class FlinkAssertionsTest {
@Test
void testEventuallySucceedsWithCompletedFuture() {
final CompletableFuture<String> completedFuture =
CompletableFuture.completedFuture("Apache Flink");
assertThatFuture(completedFuture).eventuallySucceeds().isEqualTo("Apache Flink");
}
@Test
@Test
void testEventuallySucceedsWithIncompleteFutureTimesOut() throws Exception {
assertWaitingForInterrupt(
() ->
assertThatThrownBy(
() ->
assertThatFuture(new CompletableFuture<>())
.eventuallySucceeds())
.isInstanceOf(AssertionError.class),
Duration.ofMillis(10));
}
@Test
void testEventuallyFailsWithCompletedFuture() {
final CompletableFuture<String> completedFuture =
CompletableFuture.completedFuture("Apache Flink");
assertThatThrownBy(
() ->
assertThatFuture(completedFuture)
.eventuallyFailsWith(IllegalStateException.class))
.isInstanceOf(AssertionError.class)
.hasMessageContaining("Apache Flink");
}
@Test
void testEventuallyFailsWithFailedFuture() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException());
assertThatFuture(failedFuture)
.eventuallyFailsWith(ExecutionException.class)
.withCauseInstanceOf(IllegalStateException.class);
}
@Test
void testEventuallyFailsWithFailedFutureWithDifferentException() {
final CompletableFuture<String> failedFuture = new CompletableFuture<>();
failedFuture.completeExceptionally(new IllegalStateException());
assertThatThrownBy(
() ->
assertThatFuture(failedFuture)
.eventuallyFailsWith(CancellationException.class))
.isInstanceOf(AssertionError.class)
.hasMessageContaining("CancellationException");
}
@Test
void testEventuallyFailsWithIncompleteFutureTimesOut() throws Exception {
assertWaitingForInterrupt(
() ->
assertThatFuture(new CompletableFuture<>())
.eventuallyFails()
.withThrowableOfType(InterruptedException.class),
Duration.ofMillis(10));
}
private static void assertWaitingForInterrupt(Runnable runnable, Duration timeout)
throws Exception {
final CheckedThread thread =
new CheckedThread() {
@Override
public void go() {
runnable.run();
}
};
thread.start();
Thread.sleep(timeout.toMillis());
assertThat(thread.isAlive()).isTrue();
thread.interrupt();
thread.sync();
}
}
|
Probably something to include for this path as well?
|
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
|
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"));
|
private HttpResponse handlePOST(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
|
class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final ZmsClientFacade zmsClient;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AthenzClientFactory athenzClientFactory) {
super(parentCtx);
this.controller = controller;
this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/athensDomain")) return athenzDomains(request);
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request, true);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
String userIdString = request.getProperty("userOverride");
if (userIdString == null)
userIdString = getUserId(request)
.map(UserId::id)
.orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride"));
UserId userId = new UserId(userIdString);
List<Tenant> tenants = controller.tenants().asList(userId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userId.id());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant &&
((UserTenant) tenant).is(userId.id())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse athenzDomains(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("data");
for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) {
array.addString(athenzDomain.getName());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().tenant(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request, true))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request, listApplications);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user.");
String username = UserTenant.normalizeUser(user.get().id());
try {
controller.tenants().create(UserTenant.create(username));
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName));
if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Inspector requestData = toSlime(request.getData()).get();
OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName);
controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> {
lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString()));
lockedTenant = controller.tenants().withDomain(
lockedTenant,
new AthenzDomain(mandatory("athensDomain", requestData).asString()),
token
);
Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new);
if (propertyId.isPresent()) {
lockedTenant = lockedTenant.with(propertyId.get());
}
controller.tenants().store(lockedTenant);
});
return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
Inspector requestData = toSlime(request.getData()).get();
AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName),
new AthenzDomain(mandatory("athensDomain", requestData).asString()),
new Property(mandatory("property", requestData).asString()),
optional("propertyId", requestData).map(PropertyId::new));
throwIfNotAthenzDomainAdmin(tenant.domain(), request);
controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName));
return tenant(tenant, request, true);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Application application;
try {
application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request));
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get() instanceof AthenzTenant) {
controller.tenants().deleteTenant((AthenzTenant) tenant.get(),
requireOktaAccessToken(request, "Could not delete " + tenantName));
} else if (tenant.get() instanceof UserTenant) {
controller.tenants().deleteTenant((UserTenant) tenant.get());
} else {
throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() +
", for " + tenant.get());
}
return tenant(tenant.get(), request, false);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
controller.applications().deleteApplication(id, getOktaAccessToken(request));
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().tenant(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
if (listApplications) {
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) {
AthenzIdentity identity = getUserPrincipal(request).getIdentity();
boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain);
if ( ! isDomainAdmin) {
throw new ForbiddenException(
String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName()));
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) {
return getOktaAccessToken(request)
.orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided"));
}
private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) {
return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token"))
.map(attribute -> new OktaAccessToken((String) attribute));
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
}
|
class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final ZmsClientFacade zmsClient;
@Inject
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AthenzClientFactory athenzClientFactory) {
super(parentCtx);
this.controller = controller;
this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity());
}
@Override
public Duration getTimeout() {
return Duration.ofMinutes(20);
}
@Override
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
case GET: return handleGET(request);
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case PATCH: return handlePATCH(request);
case DELETE: return handleDELETE(request);
case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
catch (ForbiddenException e) {
return ErrorResponse.forbidden(Exceptions.toMessageString(e));
}
catch (NotAuthorizedException e) {
return ErrorResponse.unauthorized(Exceptions.toMessageString(e));
}
catch (NotExistsException e) {
return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
}
catch (IllegalArgumentException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
return ErrorResponse.from(e);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
private HttpResponse handleGET(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/user")) return authenticatedUser(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/athensDomain")) return athenzDomains(request);
if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePUT(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/user")) return createUser(request);
if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handlePATCH(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}"))
return setMajorVersion(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleDELETE(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all");
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
private HttpResponse handleOPTIONS() {
EmptyJsonResponse response = new EmptyJsonResponse();
response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS");
return response;
}
private HttpResponse recursiveRoot(HttpRequest request) {
Slime slime = new Slime();
Cursor tenantArray = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
toSlime(tenantArray.addObject(), tenant, request, true);
return new SlimeJsonResponse(slime);
}
private HttpResponse root(HttpRequest request) {
return recurseOverTenants(request)
? recursiveRoot(request)
: new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property");
}
private HttpResponse authenticatedUser(HttpRequest request) {
String userIdString = request.getProperty("userOverride");
if (userIdString == null)
userIdString = getUserId(request)
.map(UserId::id)
.orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride"));
UserId userId = new UserId(userIdString);
List<Tenant> tenants = controller.tenants().asList(userId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setString("user", userId.id());
Cursor tenantsArray = response.setArray("tenants");
for (Tenant tenant : tenants)
tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject());
response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant &&
((UserTenant) tenant).is(userId.id())));
return new SlimeJsonResponse(slime);
}
private HttpResponse tenants(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setArray();
for (Tenant tenant : controller.tenants().asList())
tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject());
return new SlimeJsonResponse(slime);
}
/** Lists the screwdriver project id for each application */
private HttpResponse tenantPipelines() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor pipelinesArray = response.setArray("tenantPipelines");
for (Application application : controller.applications().asList()) {
if ( ! application.deploymentJobs().projectId().isPresent()) continue;
Cursor pipelineObject = pipelinesArray.addObject();
pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong()));
pipelineObject.setString("tenant", application.id().tenant().value());
pipelineObject.setString("application", application.id().application().value());
pipelineObject.setString("instance", application.id().instance().value());
}
response.setArray("brokenTenantPipelines");
return new SlimeJsonResponse(slime);
}
private HttpResponse athenzDomains(HttpRequest request) {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("data");
for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) {
array.addString(athenzDomain.getName());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) {
Cursor propertyObject = array.addObject();
propertyObject.setString("propertyid", entry.getKey().id());
propertyObject.setString("property", entry.getValue().id());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse tenant(String tenantName, HttpRequest request) {
return controller.tenants().tenant(TenantName.from(tenantName))
.map(tenant -> tenant(tenant, request, true))
.orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"));
}
private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) {
Slime slime = new Slime();
toSlime(slime.setObject(), tenant, request, listApplications);
return new SlimeJsonResponse(slime);
}
private HttpResponse applications(String tenantName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
for (Application application : controller.applications().asList(tenant))
toSlime(application, array.addObject(), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
return new SlimeJsonResponse(slime);
}
private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) {
Application application = getApplication(tenantName, applicationName);
Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion");
if ( ! majorVersionField.valid())
throw new IllegalArgumentException("Request body must contain a majorVersion field");
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong();
controller.applications().lockIfPresent(application.id(),
a -> controller.applications().store(a.withMajorVersion(majorVersion)));
return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion));
}
private Application getApplication(String tenantName, String applicationName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
return controller.applications().get(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
DeploymentId deployment = new DeploymentId(application, zone);
if (queryParameters.containsKey("streaming")) {
InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters);
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
logStream.transferTo(outputStream);
}
};
}
Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters);
Slime slime = new Slime();
Cursor object = slime.setObject();
if (response.isPresent()) {
response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue()));
}
return new SlimeJsonResponse(slime);
}
private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) {
String triggered = controller.applications().deploymentTrigger()
.forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName())
.stream().map(JobType::jobName).collect(joining(", "));
return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered"
: "Triggered " + triggered + " for " + id);
}
private HttpResponse pause(ApplicationId id, JobType type) {
Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause);
controller.applications().deploymentTrigger().pauseJob(id, type, until);
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
private void toSlime(Cursor object, Application application, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("deployments", withPath("/application/v4" +
"/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value() +
"/instance/" + application.id().instance().value() + "/job/",
request.getUri()).toString());
application.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
application.deploymentJobs().projectId()
.ifPresent(id -> object.setLong("projectId", id));
if ( ! application.change().isEmpty()) {
toSlime(object.setObject("deploying"), application.change());
}
if ( ! application.outstandingChange().isEmpty()) {
toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedJobs(application.deploymentJobs().jobStatus().values());
object.setBool("deployedInternally", application.deploymentJobs().deployedInternally());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().jobName());
jobObject.setBool("success", job.isSuccess());
job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered")));
job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted")));
job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing")));
job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess")));
}
Cursor changeBlockers = object.setArray("changeBlockers");
application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId());
Cursor days = changeBlockerObject.setArray("days");
changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong);
Cursor hours = changeBlockerObject.setArray("hours");
changeBlocker.window().hours().forEach(hours::addLong);
});
object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
Cursor globalRotationsArray = object.setArray("globalRotations");
application.globalDnsName(controller.system()).ifPresent(rotation -> {
globalRotationsArray.addString(rotation.url().toString());
globalRotationsArray.addString(rotation.secureUrl().toString());
globalRotationsArray.addString(rotation.oathUrl().toString());
object.setString("rotationId", application.rotation().get().asString());
});
Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id());
for (RoutingPolicy policy : routingPolicies) {
for (RotationName rotation : policy.rotations()) {
GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation);
globalRotationsArray.addString(dnsName.oathUrl().toString());
}
}
List<Deployment> deployments = controller.applications().deploymentTrigger()
.steps(application.deploymentSpec())
.sortedDeployments(application.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value());
if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
toSlime(application.rotationStatus(deployment), deploymentObject);
}
if (recurseOverDeployments(request))
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
else
deploymentObject.setString("url", withPath(request.getUri().getPath() +
"/environment/" + deployment.zone().environment().value() +
"/region/" + deployment.zone().region().value() +
"/instance/" + application.id().instance().value(),
request.getUri()).toString());
}
Cursor metricsObject = object.setObject("metrics");
metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
Cursor activity = object.setObject("activity");
application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().get(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(application.id(),
ZoneId.from(environment, region));
Deployment deployment = application.deployments().get(deploymentId.zoneId());
if (deployment == null)
throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId());
Slime slime = new Slime();
toSlime(slime.setObject(), deploymentId, deployment, request);
return new SlimeJsonResponse(slime);
}
private void toSlime(Cursor object, Change change) {
change.platform().ifPresent(version -> object.setString("version", version.toString()));
change.application()
.filter(version -> !version.isUnknown())
.ifPresent(version -> toSlime(version, object.setObject("revision")));
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
.ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
controller.zoneRegistry().getLogServerUri(deploymentId)
.ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString()));
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
response.setString("version", deployment.version().toFullString());
response.setString("revision", deployment.applicationVersion().id());
response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli());
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
Cursor activity = response.setObject("activity");
deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried",
instant.toEpochMilli()));
deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten",
instant.toEpochMilli()));
deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
DeploymentCost appCost = deployment.calculateCost();
Cursor costObject = response.setObject("cost");
toSlime(appCost, costObject);
DeploymentMetrics metrics = deployment.metrics();
Cursor metricsObject = response.setObject("metrics");
metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond());
metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond());
metricsObject.setDouble("documentCount", metrics.documentCount());
metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis());
metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis());
metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli()));
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
if (!applicationVersion.isUnknown()) {
object.setString("hash", applicationVersion.id());
sourceRevisionToSlime(applicationVersion.source(), object.setObject("source"));
}
}
private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) {
if ( ! revision.isPresent()) return;
object.setString("gitRepository", revision.get().repository());
object.setString("gitBranch", revision.get().branch());
object.setString("gitCommit", revision.get().commit());
}
private void toSlime(RotationStatus status, Cursor object) {
Cursor bcpStatus = object.setObject("bcpStatus");
bcpStatus.setString("rotationStatus", status.name().toUpperCase());
}
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Inspector requestData = toSlime(request.getData()).get();
String reason = mandatory("reason", requestData).asString();
String agent = getUserPrincipal(request).getIdentity().getFullName();
long timestamp = controller.clock().instant().getEpochSecond();
EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out;
EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp);
controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()),
endpointStatus);
return new MessageResponse(String.format("Successfully set %s in %s.%s %s service",
application.id().toShortString(),
deployment.zone().environment().value(),
deployment.zone().region().value(),
inService ? "in" : "out of"));
}
private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Slime slime = new Slime();
Cursor array = slime.setObject().setArray("globalrotationoverride");
Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId);
for (RoutingEndpoint endpoint : status.keySet()) {
EndpointStatus currentStatus = status.get(endpoint);
array.addString(endpoint.upstreamName());
Cursor statusObject = array.addObject();
statusObject.setString("status", currentStatus.getStatus().name());
statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason());
statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent());
statusObject.setLong("timestamp", currentStatus.getEpoch());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
throw new NotExistsException("global rotation does not exist for " + application);
}
Deployment deployment = application.deployments().get(zone);
if (deployment == null) {
throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
private HttpResponse deploying(String tenant, String application, HttpRequest request) {
Application app = controller.applications().require(ApplicationId.from(tenant, application, "default"));
Slime slime = new Slime();
Cursor root = slime.setObject();
if (!app.change().isEmpty()) {
app.change().platform().ifPresent(version -> root.setString("platform", version.toString()));
app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id()));
root.setBool("pinned", app.change().isPinned());
}
return new SlimeJsonResponse(slime);
}
private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
boolean suspended = controller.applications().isSuspended(deploymentId);
Slime slime = new Slime();
Cursor response = slime.setObject();
response.setBool("suspended", suspended);
return new SlimeJsonResponse(slime);
}
private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(applicationView);
return response;
}
private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) {
Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath);
ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region),
new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(),
controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)),
request.getUri());
response.setResponse(result, serviceName, restPath);
return response;
}
private HttpResponse createUser(HttpRequest request) {
Optional<UserId> user = getUserId(request);
if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user.");
String username = UserTenant.normalizeUser(user.get().id());
try {
controller.tenants().create(UserTenant.create(username));
return new MessageResponse("Created user '" + username + "'");
} catch (AlreadyExistsException e) {
return new MessageResponse("User '" + username + "' already exists");
}
}
private HttpResponse updateTenant(String tenantName, HttpRequest request) {
Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName));
if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist");
Inspector requestData = toSlime(request.getData()).get();
OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName);
controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> {
lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString()));
lockedTenant = controller.tenants().withDomain(
lockedTenant,
new AthenzDomain(mandatory("athensDomain", requestData).asString()),
token
);
Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new);
if (propertyId.isPresent()) {
lockedTenant = lockedTenant.with(propertyId.get());
}
controller.tenants().store(lockedTenant);
});
return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true);
}
private HttpResponse createTenant(String tenantName, HttpRequest request) {
Inspector requestData = toSlime(request.getData()).get();
AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName),
new AthenzDomain(mandatory("athensDomain", requestData).asString()),
new Property(mandatory("property", requestData).asString()),
optional("propertyId", requestData).map(PropertyId::new));
throwIfNotAthenzDomainAdmin(tenant.domain(), request);
controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName));
return tenant(tenant, request, true);
}
private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) {
Application application;
try {
application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request));
}
catch (ZmsClientException e) {
if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN)
throw new ForbiddenException("Not authorized to create application", e);
else
throw e;
}
Slime slime = new Slime();
toSlime(application, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
if ( ! systemHasVersion(version))
throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " +
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions()
.stream()
.map(VespaVersion::versionNumber)
.map(Version::toString)
.collect(joining(", ")));
Change change = Change.of(version);
if (pin)
change = change.withPin();
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) {
controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
return new MessageResponse(response.toString());
}
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
StringBuilder response = new StringBuilder();
controller.applications().lockOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
return;
}
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
controller.applications().require(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
}
/** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
ZoneId.from(environment, region));
Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new);
controller.applications().restart(deploymentId, hostname);
return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = ZoneId.from(environment, region);
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
if ( ! dataParts.containsKey("deployOptions"))
return ErrorResponse.badRequest("Missing required form part 'deployOptions'");
Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get();
/*
* Special handling of the zone application (the only system application with an application package)
* Setting any other deployOptions here is not supported for now (e.g. specifying version), but
* this might be handy later to handle emergency downgrades.
*/
boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId);
if (isZoneApplication) {
String versionStr = deployOptions.field("vespaVersion").asString();
boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null");
if (versionPresent) {
throw new RuntimeException("Version not supported for system applications");
}
if (controller.versionStatus().isUpgrading()) {
throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed");
}
Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion();
if (systemVersion.isEmpty()) {
throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined");
}
ActivateResult result = controller.applications()
.deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber());
return new SlimeJsonResponse(toSlime(result));
}
/*
* Normal applications from here
*/
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
if (sourceRevision.valid() != buildNumber.valid())
throw new IllegalArgumentException("Source revision and build number must both be provided, or not");
Optional<ApplicationVersion> applicationVersion = Optional.empty();
if (sourceRevision.valid()) {
if (applicationPackage.isPresent())
throw new IllegalArgumentException("Application version and application package can't both be provided.");
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new);
/*
* Deploy direct is when we want to redeploy the current application - retrieve version
* info from the application package before deploying
*/
if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) {
Optional<Deployment> deployment = controller.applications().get(applicationId)
.map(Application::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
if(!deployment.isPresent())
throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist");
ApplicationVersion version = deployment.get().applicationVersion();
if(version.isUnknown())
throw new IllegalArgumentException("Can't redeploy application, application version is unknown");
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
}
DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly,
vespaVersion,
deployOptions.field("ignoreValidationErrors").asBool(),
deployOptions.field("deployCurrentVersion").asBool());
ActivateResult result = controller.applications().deploy(applicationId,
zone,
applicationPackage,
applicationVersion,
deployOptionsJsonClass,
Optional.of(getUserPrincipal(request).getIdentity()));
return new SlimeJsonResponse(toSlime(result));
}
private HttpResponse deleteTenant(String tenantName, HttpRequest request) {
Optional<Tenant> tenant = controller.tenants().tenant(tenantName);
if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found");
if (tenant.get() instanceof AthenzTenant) {
controller.tenants().deleteTenant((AthenzTenant) tenant.get(),
requireOktaAccessToken(request, "Could not delete " + tenantName));
} else if (tenant.get() instanceof UserTenant) {
controller.tenants().deleteTenant((UserTenant) tenant.get());
} else {
throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() +
", for " + tenant.get());
}
return tenant(tenant.get(), request, false);
}
private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
controller.applications().deleteApplication(id, getOktaAccessToken(request));
return new EmptyJsonResponse();
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
controller.applications().deactivate(application.id(), ZoneId.from(environment, region));
return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName,
ApplicationResource.API_PATH, applicationName,
EnvironmentResource.API_PATH, environment,
"region", region,
"instance", instanceName));
}
/**
* Promote application Chef environments. To be used by component jobs only
*/
private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) {
try{
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.systemChefEnvironment();
String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
/**
* Promote application Chef environments for jobs that deploy applications
*/
private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) {
try {
ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system());
String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName));
String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName));
controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment);
return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment));
} catch (Exception e) {
log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e);
return ErrorResponse.internalServerError("Unable to promote Chef environments for application");
}
}
private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
&& controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
controller.applications().deploymentTrigger().notifyOfCompletion(report);
return new MessageResponse("ok");
} catch (IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
}
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString()));
}
ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString());
JobType type = JobType.fromJobName(report.field("jobName").asString());
long buildNumber = report.field("buildNumber").asLong();
if (type == JobType.component)
return DeploymentJobs.JobReport.ofComponent(id,
report.field("projectId").asLong(),
buildNumber,
jobError,
toSourceRevision(report.field("sourceRevision")));
else
return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError);
}
private static SourceRevision toSourceRevision(Inspector object) {
if (!object.field("repository").valid() ||
!object.field("branch").valid() ||
!object.field("commit").valid()) {
throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\".");
}
return new SourceRevision(object.field("repository").asString(),
object.field("branch").asString(),
object.field("commit").asString());
}
private Tenant getTenantOrThrow(String tenantName) {
return controller.tenants().tenant(tenantName)
.orElseThrow(() -> new NotExistsException(new TenantId(tenantName)));
}
private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) {
object.setString("tenant", tenant.name().value());
object.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
object.setString("athensDomain", athenzTenant.domain().getName());
object.setString("property", athenzTenant.property().id());
athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString()));
}
Cursor applicationArray = object.setArray("applications");
if (listApplications) {
for (Application application : controller.applications().asList(tenant.name())) {
if (application.id().instance().isDefault()) {
if (recurseOverApplications(request))
toSlime(applicationArray.addObject(), application, request);
else
toSlime(application, applicationArray.addObject(), request);
}
}
}
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
athenzTenant.contact().ifPresent(c -> {
object.setString("propertyUrl", c.propertyUrl().toString());
object.setString("contactsUrl", c.url().toString());
object.setString("issueCreationUrl", c.issueTrackerUrl().toString());
Cursor contactsArray = object.setArray("contacts");
c.persons().forEach(persons -> {
Cursor personArray = contactsArray.addArray();
persons.forEach(personArray::addString);
});
});
}
}
private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) {
object.setString("tenant", tenant.name().value());
Cursor metaData = object.setObject("metaData");
metaData.setString("type", tentantType(tenant));
if (tenant instanceof AthenzTenant) {
AthenzTenant athenzTenant = (AthenzTenant) tenant;
metaData.setString("athensDomain", athenzTenant.domain().getName());
metaData.setString("property", athenzTenant.property().id());
}
object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString());
}
/** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */
private URI withPath(String newPath, URI uri) {
try {
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null);
}
catch (URISyntaxException e) {
throw new RuntimeException("Will not happen", e);
}
}
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
return Long.parseLong(valueOrNull);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'");
}
}
private void toSlime(JobStatus.JobRun jobRun, Cursor object) {
object.setLong("id", jobRun.id());
object.setString("version", jobRun.platform().toFullString());
if (!jobRun.application().isUnknown())
toSlime(jobRun.application(), object.setObject("revision"));
object.setString("reason", jobRun.reason());
object.setLong("at", jobRun.at().toEpochMilli());
}
private Slime toSlime(InputStream jsonStream) {
try {
byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
return SlimeUtils.jsonToSlime(jsonBytes);
} catch (IOException e) {
throw new RuntimeException();
}
}
private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) {
AthenzIdentity identity = getUserPrincipal(request).getIdentity();
boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain);
if ( ! isDomainAdmin) {
throw new ForbiddenException(
String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName()));
}
}
private static Optional<UserId> getUserId(HttpRequest request) {
return Optional.of(getUserPrincipal(request))
.map(AthenzPrincipal::getIdentity)
.filter(AthenzUser.class::isInstance)
.map(AthenzUser.class::cast)
.map(AthenzUser::getName)
.map(UserId::new);
}
private static AthenzPrincipal getUserPrincipal(HttpRequest request) {
Principal principal = request.getJDiscRequest().getUserPrincipal();
if (principal == null) throw new InternalServerErrorException("Expected a user principal");
if (!(principal instanceof AthenzPrincipal))
throw new InternalServerErrorException(
String.format("Expected principal of type %s, got %s",
AthenzPrincipal.class.getSimpleName(), principal.getClass().getName()));
return (AthenzPrincipal) principal;
}
private Inspector mandatory(String key, Inspector object) {
if ( ! object.field(key).valid())
throw new IllegalArgumentException("'" + key + "' is missing");
return object.field(key);
}
private Optional<String> optional(String key, Inspector object) {
return SlimeUtils.optionalString(object.field(key));
}
private static String path(Object... elements) {
return Joiner.on("/").join(elements);
}
private void toSlime(Application application, Cursor object, HttpRequest request) {
object.setString("application", application.id().application().value());
object.setString("instance", application.id().instance().value());
object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() +
"/application/" + application.id().application().value(), request.getUri()).toString());
}
private Slime toSlime(ActivateResult result) {
Slime slime = new Slime();
Cursor object = slime.setObject();
object.setString("revisionId", result.revisionId().id());
object.setLong("applicationZipSize", result.applicationZipSizeBytes());
Cursor logArray = object.setArray("prepareMessages");
if (result.prepareResponse().log != null) {
for (Log logMessage : result.prepareResponse().log) {
Cursor logObject = logArray.addObject();
logObject.setLong("time", logMessage.time);
logObject.setString("level", logMessage.level);
logObject.setString("message", logMessage.message);
}
}
Cursor changeObject = object.setObject("configChangeActions");
Cursor restartActionsArray = changeObject.setArray("restart");
for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) {
Cursor restartActionObject = restartActionsArray.addObject();
restartActionObject.setString("clusterName", restartAction.clusterName);
restartActionObject.setString("clusterType", restartAction.clusterType);
restartActionObject.setString("serviceType", restartAction.serviceType);
serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services"));
stringsToSlime(restartAction.messages, restartActionObject.setArray("messages"));
}
Cursor refeedActionsArray = changeObject.setArray("refeed");
for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) {
Cursor refeedActionObject = refeedActionsArray.addObject();
refeedActionObject.setString("name", refeedAction.name);
refeedActionObject.setBool("allowed", refeedAction.allowed);
refeedActionObject.setString("documentType", refeedAction.documentType);
refeedActionObject.setString("clusterName", refeedAction.clusterName);
serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services"));
stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages"));
}
return slime;
}
private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) {
for (ServiceInfo serviceInfo : serviceInfoList) {
Cursor serviceInfoObject = array.addObject();
serviceInfoObject.setString("serviceName", serviceInfo.serviceName);
serviceInfoObject.setString("serviceType", serviceInfo.serviceType);
serviceInfoObject.setString("configId", serviceInfo.configId);
serviceInfoObject.setString("hostName", serviceInfo.hostName);
}
}
private void stringsToSlime(List<String> strings, Cursor array) {
for (String string : strings)
array.addString(string);
}
private String readToString(InputStream stream) {
Scanner scanner = new Scanner(stream).useDelimiter("\\A");
if ( ! scanner.hasNext()) return null;
return scanner.next();
}
private boolean systemHasVersion(Version version) {
return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version));
}
public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
object.setLong("tco", (long)deploymentCost.getTco());
object.setLong("waste", (long)deploymentCost.getWaste());
object.setDouble("utilization", deploymentCost.getUtilization());
Cursor clustersObject = object.setObject("cluster");
for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
}
private static void toSlime(ClusterCost clusterCost, Cursor object) {
object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
object.setLong("tco", (int)clusterCost.getTco());
object.setLong("waste", (int)clusterCost.getWaste());
object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost());
object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU());
object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem());
object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk());
object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
Cursor utilObject = object.setObject("util");
utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
Cursor usageObject = object.setObject("usage");
usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
Cursor hostnamesArray = object.setArray("hostnames");
for (String hostname : clusterCost.getClusterInfo().getHostnames())
hostnamesArray.addString(hostname);
}
private static String getResourceName(ClusterUtilization utilization) {
String name = "cpu";
double max = utilization.getMaxUtilization();
if (utilization.getMemory() == max) {
name = "mem";
} else if (utilization.getDisk() == max) {
name = "disk";
} else if (utilization.getDiskBusy() == max) {
name = "diskbusy";
}
return name;
}
private static boolean recurseOverTenants(HttpRequest request) {
return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive"));
}
private static boolean recurseOverApplications(HttpRequest request) {
return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive"));
}
private static boolean recurseOverDeployments(HttpRequest request) {
return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive"));
}
private static String tentantType(Tenant tenant) {
if (tenant instanceof AthenzTenant) {
return "ATHENS";
} else if (tenant instanceof UserTenant) {
return "USER";
}
throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName());
}
private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) {
return getOktaAccessToken(request)
.orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided"));
}
private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) {
return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token"))
.map(attribute -> new OktaAccessToken((String) attribute));
}
private static ApplicationId appIdFromPath(Path path) {
return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance"));
}
private static JobType jobTypeFromPath(Path path) {
return JobType.fromJobName(path.get("jobtype"));
}
private static RunId runIdFromPath(Path path) {
long number = Long.parseLong(path.get("number"));
return new RunId(appIdFromPath(path), jobTypeFromPath(path), number);
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
Map<String, byte[]> dataParts = new MultipartParser().parse(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
SourceRevision sourceRevision = toSourceRevision(submitOptions);
String authorEmail = submitOptions.field("authorEmail").asString();
long projectId = Math.max(1, submitOptions.field("projectId").asLong());
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant),
applicationPackage,
Optional.of(getUserPrincipal(request).getIdentity()));
return JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
tenant,
application,
sourceRevision,
authorEmail,
projectId,
applicationPackage,
dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP));
}
}
|
You only need to check to `j < i` or start at `i + 1`, then you wont need the `if` below...
|
public List<Node> addNodes(List<Node> nodes) {
try (Mutex lock = lockAllocation()) {
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
var message = "Cannot add " + node.hostname() + ": A node with this name already exists";
if (getNode(node.hostname()).isPresent()) throw new IllegalArgumentException(message);
for (int j = 0; j < nodes.size(); j++) {
if (i == j) continue;
var other = nodes.get(j);
if (node.equals(other)) throw new IllegalArgumentException(message);
}
}
return db.addNodes(nodes);
}
}
|
for (int j = 0; j < nodes.size(); j++) {
|
public List<Node> addNodes(List<Node> nodes) {
try (Mutex lock = lockAllocation()) {
for (int i = 0; i < nodes.size(); i++) {
var node = nodes.get(i);
var message = "Cannot add " + node.hostname() + ": A node with this name already exists";
if (getNode(node.hostname()).isPresent()) throw new IllegalArgumentException(message);
for (int j = 0; j < i; j++) {
var other = nodes.get(j);
if (node.equals(other)) throw new IllegalArgumentException(message);
}
}
return db.addNodes(nodes);
}
}
|
class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(db);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage(NodeType nodeType) { return dockerImages.dockerImageFor(nodeType); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return new NodeList(getNodes());
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return new LoadBalancerList(database().readLoadBalancers().values());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates, LoadBalancerList loadBalancers) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers.owner(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
LoadBalancerList loadBalancers = loadBalancers();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates, loadBalancers))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates, loadBalancers));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Optional<String> modelName, Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, modelName, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, Optional.empty(), flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes, Mutex allocationLock) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes, Node.State.reserved);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockAllocation()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockAllocation()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
list().childrenOf(node).asList().stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockAllocation()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to allocating nodes */
public Mutex lockAllocation() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockAllocation();
}
}
|
class NodeRepository extends AbstractComponent {
private final CuratorDatabaseClient db;
private final Clock clock;
private final Zone zone;
private final NodeFlavors flavors;
private final NameResolver nameResolver;
private final OsVersions osVersions;
private final InfrastructureVersions infrastructureVersions;
private final FirmwareChecks firmwareChecks;
private final DockerImages dockerImages;
private final JobControl jobControl;
/**
* Creates a node repository from a zookeeper provider.
* This will use the system time to make time-sensitive decisions
*/
@Inject
public NodeRepository(NodeRepositoryConfig config, NodeFlavors flavors, Curator curator, Zone zone) {
this(flavors, curator, Clock.systemUTC(), zone, new DnsNameResolver(), DockerImage.fromString(config.dockerImage()), config.useCuratorClientCache());
}
/**
* Creates a node repository from a zookeeper provider and a clock instance
* which will be used for time-sensitive decisions.
*/
public NodeRepository(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, NameResolver nameResolver,
DockerImage dockerImage, boolean useCuratorClientCache) {
this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.flavors = flavors;
this.nameResolver = nameResolver;
this.osVersions = new OsVersions(db);
this.infrastructureVersions = new InfrastructureVersions(db);
this.firmwareChecks = new FirmwareChecks(db, clock);
this.dockerImages = new DockerImages(db, dockerImage);
this.jobControl = new JobControl(db);
for (Node.State state : Node.State.values())
db.writeTo(state, db.getNodes(state), Agent.system, Optional.empty());
}
/** Returns the curator database client used by this */
public CuratorDatabaseClient database() { return db; }
/** Returns the Docker image to use for nodes in this */
public DockerImage dockerImage(NodeType nodeType) { return dockerImages.dockerImageFor(nodeType); }
/** @return The name resolver used to resolve hostname and ip addresses */
public NameResolver nameResolver() { return nameResolver; }
/** Returns the OS versions to use for nodes in this */
public OsVersions osVersions() { return osVersions; }
/** Returns the infrastructure versions to use for nodes in this */
public InfrastructureVersions infrastructureVersions() { return infrastructureVersions; }
/** Returns the status of firmware checks for hosts managed by this. */
public FirmwareChecks firmwareChecks() { return firmwareChecks; }
/** Returns the docker images to use for nodes in this. */
public DockerImages dockerImages() { return dockerImages; }
/** Returns the status of maintenance jobs managed by this. */
public JobControl jobControl() { return jobControl; }
/**
* Finds and returns the node with the hostname in any of the given states, or empty if not found
*
* @param hostname the full host name of the node
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
public Optional<Node> getNode(String hostname, Node.State ... inState) {
return db.getNode(hostname, inState);
}
/**
* Returns all nodes in any of the given states.
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(Node.State ... inState) {
return new ArrayList<>(db.getNodes(inState));
}
/**
* Finds and returns the nodes of the given type in any of the given states.
*
* @param type the node type to return
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
* @return the node, or empty if it was not found in any of the given states
*/
public List<Node> getNodes(NodeType type, Node.State ... inState) {
return db.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
}
/** Returns a filterable list of all nodes in this repository */
public NodeList list() {
return new NodeList(getNodes());
}
/** Returns a filterable list of all load balancers in this repository */
public LoadBalancerList loadBalancers() {
return new LoadBalancerList(database().readLoadBalancers().values());
}
public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return db.getNodes(id, inState); }
public List<Node> getInactive() { return db.getNodes(Node.State.inactive); }
public List<Node> getFailed() { return db.getNodes(Node.State.failed); }
/**
* Returns the ACL for the node (trusted nodes, networks and ports)
*/
private NodeAcl getNodeAcl(Node node, NodeList candidates, LoadBalancerList loadBalancers) {
Set<Node> trustedNodes = new TreeSet<>(Comparator.comparing(Node::hostname));
Set<Integer> trustedPorts = new LinkedHashSet<>();
Set<String> trustedNetworks = new LinkedHashSet<>();
node.allocation().ifPresent(allocation -> {
trustedNodes.addAll(candidates.owner(allocation.owner()).asList());
loadBalancers.owner(allocation.owner()).asList().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
trustedPorts.add(22);
switch (node.type()) {
case tenant:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedNodes.addAll(candidates.parentsOf(trustedNodes).asList());
trustedNodes.addAll(candidates.nodeType(NodeType.proxy).asList());
if (node.state() == Node.State.ready) {
trustedNodes.addAll(candidates.nodeType(NodeType.tenant).asList());
}
break;
case config:
trustedNodes.addAll(candidates.asList());
trustedPorts.add(4443);
break;
case proxy:
trustedNodes.addAll(candidates.nodeType(NodeType.config).asList());
trustedPorts.add(443);
trustedPorts.add(4080);
trustedPorts.add(4443);
break;
case controller:
trustedPorts.add(4443);
trustedPorts.add(443);
break;
default:
throw new IllegalArgumentException(
String.format("Don't know how to create ACL for node [hostname=%s type=%s]",
node.hostname(), node.type()));
}
return new NodeAcl(node, trustedNodes, trustedNetworks, trustedPorts);
}
/**
* Creates a list of node ACLs which identify which nodes the given node should trust
*
* @param node Node for which to generate ACLs
* @param children Return ACLs for the children of the given node (e.g. containers on a Docker host)
* @return List of node ACLs
*/
public List<NodeAcl> getNodeAcls(Node node, boolean children) {
NodeList candidates = list();
LoadBalancerList loadBalancers = loadBalancers();
if (children) {
return candidates.childrenOf(node).asList().stream()
.map(childNode -> getNodeAcl(childNode, candidates, loadBalancers))
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
return Collections.singletonList(getNodeAcl(node, candidates, loadBalancers));
}
public NodeFlavors getAvailableFlavors() {
return flavors;
}
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> ipAddressPool, Optional<String> parentHostname,
Optional<String> modelName, Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), ipAddressPool, hostname, parentHostname, modelName, flavor, type);
}
public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, Optional.empty(), flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
return createNode(openStackId, hostname, Collections.emptySet(), parentHostname, flavor, type);
}
/** Adds a list of newly created docker container nodes to the node repository as <i>reserved</i> nodes */
public List<Node> addDockerNodes(List<Node> nodes, Mutex allocationLock) {
for (Node node : nodes) {
if (!node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": This is not a docker node");
}
if (!node.allocation().isPresent()) {
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": Docker containers needs to be allocated");
}
Optional<Node> existing = getNode(node.hostname());
if (existing.isPresent())
throw new IllegalArgumentException("Cannot add " + node.hostname() + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
node + ", " + node.history());
}
return db.addNodesInState(nodes, Node.State.reserved);
}
/** Adds a list of (newly created) nodes to the node repository as <i>provisioned</i> nodes */
/** Sets a list of nodes ready and returns the nodes in the ready state */
public List<Node> setReady(List<Node> nodes, Agent agent, String reason) {
try (Mutex lock = lockAllocation()) {
List<Node> nodesWithResetFields = nodes.stream()
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
throw new IllegalArgumentException("Can not set " + node + " ready. It is not provisioned or dirty.");
return node.with(node.status().withWantToRetire(false).withWantToDeprovision(false));
})
.collect(Collectors.toList());
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
public Node setReady(String hostname, Agent agent, String reason) {
Node nodeToReady = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
return setReady(Collections.singletonList(nodeToReady), agent, reason).get(0);
}
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
}
/** Activate nodes. This method does <b>not</b> lock the node repository */
public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
* @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state.
*/
public void setRemovable(ApplicationId application, List<Node> nodes) {
try (Mutex lock = lock(application)) {
List<Node> removableNodes =
nodes.stream().map(node -> node.with(node.allocation().get().removable()))
.collect(Collectors.toList());
write(removableNodes);
}
}
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
db.writeTo(Node.State.inactive,
db.getNodes(application, Node.State.reserved, Node.State.active),
Agent.application, Optional.empty(), transaction
);
}
}
/**
* Deactivates these nodes in a transaction and returns
* the nodes in the new state which will hold if the transaction commits.
* This method does <b>not</b> lock
*/
public List<Node> deactivate(List<Node> nodes, NestedTransaction transaction) {
return db.writeTo(Node.State.inactive, nodes, Agent.application, Optional.empty(), transaction);
}
/** Move nodes to the dirty state */
public List<Node> setDirty(List<Node> nodes, Agent agent, String reason) {
return performOn(NodeListFilter.from(nodes), node -> setDirty(node, agent, reason));
}
/**
* Set a node dirty, which is in the provisioned, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*
* @throws IllegalArgumentException if the node has hardware failure
*/
public Node setDirty(Node node, Agent agent, String reason) {
if (node.status().hardwareFailureDescription().isPresent())
throw new IllegalArgumentException("Could not deallocate " + node.hostname() + ": It has a hardware failure");
return db.writeTo(Node.State.dirty, node, agent, Optional.of(reason));
}
public List<Node> dirtyRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = getNode(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
(nodeToDirty.type().isDockerHost() ?
Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
Stream.of(nodeToDirty))
.filter(node -> node.state() != Node.State.dirty)
.collect(Collectors.toList());
List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
.filter(node -> node.state() != Node.State.provisioned)
.filter(node -> node.state() != Node.State.failed)
.filter(node -> node.state() != Node.State.parked)
.map(Node::hostname)
.collect(Collectors.toList());
if (!hostnamesNotAllowedToDirty.isEmpty()) {
throw new IllegalArgumentException("Could not deallocate " + hostname + ": " +
String.join(", ", hostnamesNotAllowedToDirty) + " must be in either provisioned, failed or parked state");
}
return nodesToDirty.stream()
.map(node -> setDirty(node, agent, reason))
.collect(Collectors.toList());
}
/**
* Fails this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node fail(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.failed, agent, Optional.of(reason));
}
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
*
* @return List of all the failed nodes in their new state
*/
public List<Node> failRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
}
/**
* Parks this node and returns it in its new state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) {
return move(hostname, keepAllocation, Node.State.parked, agent, Optional.of(reason));
}
/**
* Parks all the nodes that are children of hostname before finally parking the hostname itself.
*
* @return List of all the parked nodes in their new state
*/
public List<Node> parkRecursively(String hostname, Agent agent, String reason) {
return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason));
}
/**
* Moves a previously failed or parked node back to the active state.
*
* @return the node in its new state
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
return move(hostname, true, Node.State.active, agent, Optional.of(reason));
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
List<Node> moved = list().childrenOf(hostname).asList().stream()
.map(child -> move(child, toState, agent, reason))
.collect(Collectors.toList());
moved.add(move(hostname, true, toState, agent, reason));
return moved;
}
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason) {
Node node = getNode(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
node = node.withoutAllocation();
}
return move(node, toState, agent, reason);
}
private Node move(Node node, Node.State toState, Agent agent, Optional<String> reason) {
if (toState == Node.State.active && ! node.allocation().isPresent())
throw new IllegalArgumentException("Could not set " + node.hostname() + " active. It has no allocation.");
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
throw new IllegalArgumentException("Could not move " + node + " to active:" +
"It has the same cluster and index as an existing node");
}
}
return db.writeTo(toState, node, agent, reason);
}
}
/*
* This method is used by the REST API to handle readying nodes for new allocations. For tenant docker
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty) {
throw new IllegalArgumentException(
"Cannot make " + hostname + " available for new allocation, must be in state dirty, but was in " + node.state());
}
return removeRecursively(node, true).get(0);
}
if (node.state() == Node.State.ready) return node;
return setReady(Collections.singletonList(node), agent, reason).get(0);
}
/**
* Removes all the nodes that are children of hostname before finally removing the hostname itself.
*
* @return List of all the nodes that have been removed
*/
public List<Node> removeRecursively(String hostname) {
Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname \"" + hostname + '"'));
return removeRecursively(node, false);
}
public List<Node> removeRecursively(Node node, boolean force) {
try (Mutex lock = lockAllocation()) {
List<Node> removed = new ArrayList<>();
if (node.type().isDockerHost()) {
list().childrenOf(node).asList().stream()
.filter(child -> force || canRemove(child, true))
.forEach(removed::add);
}
if (force || canRemove(node, false)) removed.add(node);
db.removeNodes(removed);
return removed;
} catch (RuntimeException e) {
throw new IllegalArgumentException("Failed to delete " + node.hostname(), e);
}
}
/**
* Returns whether given node can be removed. Removal is allowed if:
* Tenant node: node is unallocated
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
* If also removing the parent node: child is in state provisioned|failed|parked|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
throw new IllegalArgumentException("Node is currently allocated and cannot be removed: " +
node.allocation().get());
}
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && !deletingAsChild) {
if (node.state() != Node.State.ready) {
throw new IllegalArgumentException(
String.format("Docker container %s can only be removed when in ready state", node.hostname()));
}
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
} else {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Node %s can only be removed from following states: %s",
node.hostname(), legalStates.stream().map(Node.State::name).collect(Collectors.joining(", "))));
}
}
return true;
}
/**
* Increases the restart generation of the active nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> restart(NodeFilter filter) {
return performOn(StateFilter.from(Node.State.active, filter), node -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted())));
}
/**
* Increases the reboot generation of the nodes matching the filter.
* Returns the nodes in their new state.
*/
public List<Node> reboot(NodeFilter filter) {
return performOn(filter, node -> write(node.withReboot(node.status().reboot().withIncreasedWanted())));
}
/**
* Writes this node after it has changed some internal state but NOT changed its state field.
* This does NOT lock the node repository.
*
* @return the written node for convenience
*/
public Node write(Node node) { return db.writeTo(node.state(), node, Agent.system, Optional.empty()); }
/**
* Writes these nodes after they have changed some internal state but NOT changed their state field.
* This does NOT lock the node repository.
*
* @return the written nodes for convenience
*/
public List<Node> write(List<Node> nodes) { return db.writeTo(nodes, Agent.system, Optional.empty()); }
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
* @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
private List<Node> performOn(NodeFilter filter, UnaryOperator<Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
for (Node node : db.getNodes()) {
if ( ! filter.matches(node)) continue;
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
unallocatedNodes.add(node);
}
List<Node> resultingNodes = new ArrayList<>();
try (Mutex lock = lockAllocation()) {
for (Node node : unallocatedNodes)
resultingNodes.add(action.apply(node));
}
for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
try (Mutex lock = lock(applicationNodes.getKey())) {
for (Node node : applicationNodes.getValue())
resultingNodes.add(action.apply(node));
}
}
return resultingNodes;
}
/** Returns the time keeper of this system */
public Clock clock() { return clock; }
/** Returns the zone of this system */
public Zone zone() { return zone; }
/** Create a lock which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application) { return db.lock(application); }
/** Create a lock with a timeout which provides exclusive rights to making changes to the given application */
public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); }
/** Create a lock which provides exclusive rights to allocating nodes */
public Mutex lockAllocation() { return db.lockInactive(); }
/** Acquires the appropriate lock for this node */
public Mutex lock(Node node) {
return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockAllocation();
}
}
|
For this test, we should just be able to instantiate just the encryption options class. We don't necessarily need to create an encrypted blob client to exercise the validation logic.
|
public void illegalRegionLength(long regionLength) {
assertThrows(IllegalArgumentException.class, () ->
new EncryptedBlobClientBuilder(EncryptionVersion.V2)
.blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength))
.buildEncryptedBlobClient());
}
|
.blobEncryptionOptions(new BlobEncryptionOptions().setAuthenticatedRegionDataLength(regionLength))
|
public void illegalRegionLength(long regionLength) {
assertThrows(IllegalArgumentException.class, () -> new BlobClientSideEncryptionOptions()
.setAuthenticatedRegionDataLengthInBytes(regionLength));
}
|
class EncryptedBlobClientBuilderTests {
private static final StorageSharedKeyCredential CREDENTIALS =
new StorageSharedKeyCredential("accountName", "accountKey");
private static final String ENDPOINT = "https:
private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2,
2, 1000L, 4000L, null);
private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1,
Duration.ofSeconds(1)));
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-storage-blob-cryptography.properties");
private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName");
private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion");
private static final List<Header> CLIENT_OPTIONS_HEADERS;
private static final Map<HttpHeaderName, String> HEADERS_MAP;
static {
CLIENT_OPTIONS_HEADERS = new ArrayList<>();
CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header"));
CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis"));
CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten"));
HEADERS_MAP = new LinkedHashMap<>();
HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header");
HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis");
HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten");
}
private byte[] randomData;
@BeforeEach
public void setup() {
randomData = new byte[256];
new SecureRandom().nextBytes(randomData);
}
static HttpRequest request(String url) {
return new HttpRequest(HttpMethod.HEAD, url);
}
/**
* Tests that a new date will be applied to every retry when using the encrypted blob client builder's default
* pipeline.
*/
@Test
public void encryptedBlobClientFreshDateOnRetry() {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new FreshDateTestClient())
.retryOptions(REQUEST_RETRY_OPTIONS)
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
/**
* Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default
* pipeline.
*/
@ParameterizedTest
@CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id",
"log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */})
public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA,
String expectedUA) {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new ApplicationIdUAStringTestClient(expectedUA))
.httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA))
.clientOptions(new ClientOptions().setApplicationId(clientOptionsUA))
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
/**
* Tests that custom headers will be honored when using the encrypted blob client builder's default
* pipeline.
*/
@Test
public void encryptedBlobClientCustomHeadersClientOptions() {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP))
.clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS))
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@Test
public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() {
assertDoesNotThrow(() -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("foo")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new StorageSharedKeyCredential("foo", "bar"))
.credential(new MockTokenCredential())
.sasToken("foo")
.buildEncryptedBlobClient());
}
@Test
public void throwsOnAmbiguousCredentialsWithAzureSasCredential() {
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new StorageSharedKeyCredential("foo", "bar"))
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new MockTokenCredential())
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.sasToken("foo")
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT + "?sig=foo")
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
}
@Test
public void onlyOneRetryOptionsCanBeApplied() {
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("foo")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.retryOptions(REQUEST_RETRY_OPTIONS)
.retryOptions(CORE_RETRY_OPTIONS)
.buildEncryptedBlobClient());
}
@Test
public void constructFromBlobClientBlobUserAgentModificationPolicy() {
BlobClient blobClient = new BlobClientBuilder()
.endpoint(ENDPOINT)
.credential(CREDENTIALS)
.blobName("foo")
.containerName("container")
.httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*"))
.buildClient();
EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder()
.blobClient(blobClient)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.buildEncryptedBlobClient();
sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl());
}
@Test
public void constructFromNoClientBlobUserAgentModificationPolicy() {
EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new AzureSasCredential("foo"))
.httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*"))
.buildEncryptedBlobClient();
sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl());
}
private static Stream<Arguments> getNonEncodedBlobNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點"));
}
@ParameterizedTest
@MethodSource("getNonEncodedBlobNameSupplier")
public void getNonEncodedBlobName(String originalBlobName) {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName(originalBlobName)
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.buildEncryptedBlobClient();
assertEquals(encryptedBlobClient.getBlobName(), originalBlobName);
String encodedName = Utility.urlEncode(originalBlobName);
assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName));
}
@ParameterizedTest
@ValueSource(longs = { 0, -1, 15, 4L * Constants.GB })
private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) {
boolean foundPolicy = false;
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy);
}
assertTrue(foundPolicy);
StepVerifier.create(pipeline.send(request(url)))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static final class UAStringTestClient implements HttpClient {
private final Pattern pattern;
UAStringTestClient(String regex) {
this.pattern = Pattern.compile(regex);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) {
throw new RuntimeException("Failed to set 'User-Agent' header.");
}
Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT));
assertTrue(matcher.matches());
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class FreshDateTestClient implements HttpClient {
private String firstDate;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (firstDate == null) {
firstDate = request.getHeaders().getValue(HttpHeaderName.DATE);
return Mono.error(new IOException("IOException!"));
}
assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE));
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class ApplicationIdUAStringTestClient implements HttpClient {
private final String expectedUA;
ApplicationIdUAStringTestClient(String expectedUA) {
this.expectedUA = expectedUA;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA));
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class ClientOptionsHeadersTestClient implements HttpClient {
private final Map<HttpHeaderName, String> headers;
ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) {
this.headers = headers;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
headers.forEach((name, value) -> {
if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) {
throw new RuntimeException("Failed to set custom header " + name);
}
if (name == HttpHeaderName.AUTHORIZATION) {
if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) {
throw new RuntimeException("Custom header " + name + " did not match expectation.");
}
} else {
if (!Objects.equals(value, request.getHeaders().getValue(name))) {
throw new RuntimeException("Custom header " + name + " did not match expectation.");
}
}
});
return Mono.just(new MockHttpResponse(request, 200));
}
}
}
|
class EncryptedBlobClientBuilderTests {
private static final StorageSharedKeyCredential CREDENTIALS =
new StorageSharedKeyCredential("accountName", "accountKey");
private static final String ENDPOINT = "https:
private static final RequestRetryOptions REQUEST_RETRY_OPTIONS = new RequestRetryOptions(RetryPolicyType.FIXED, 2,
2, 1000L, 4000L, null);
private static final RetryOptions CORE_RETRY_OPTIONS = new RetryOptions(new FixedDelayOptions(1,
Duration.ofSeconds(1)));
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-storage-blob-cryptography.properties");
private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName");
private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion");
private static final List<Header> CLIENT_OPTIONS_HEADERS;
private static final Map<HttpHeaderName, String> HEADERS_MAP;
static {
CLIENT_OPTIONS_HEADERS = new ArrayList<>();
CLIENT_OPTIONS_HEADERS.add(new Header("custom", "header"));
CLIENT_OPTIONS_HEADERS.add(new Header("Authorization", "notthis"));
CLIENT_OPTIONS_HEADERS.add(new Header("User-Agent", "overwritten"));
HEADERS_MAP = new LinkedHashMap<>();
HEADERS_MAP.put(HttpHeaderName.fromString("custom"), "header");
HEADERS_MAP.put(HttpHeaderName.AUTHORIZATION, "notthis");
HEADERS_MAP.put(HttpHeaderName.USER_AGENT, "overwritten");
}
private byte[] randomData;
@BeforeEach
public void setup() {
randomData = new byte[256];
new SecureRandom().nextBytes(randomData);
}
static HttpRequest request(String url) {
return new HttpRequest(HttpMethod.HEAD, url);
}
/**
* Tests that a new date will be applied to every retry when using the encrypted blob client builder's default
* pipeline.
*/
@Test
public void encryptedBlobClientFreshDateOnRetry() {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new FreshDateTestClient())
.retryOptions(REQUEST_RETRY_OPTIONS)
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
/**
* Tests that a user application id will be honored in the UA string when using the encrypted blob client builder's default
* pipeline.
*/
@ParameterizedTest
@CsvSource(value = {"log-options-id,,log-options-id", ",client-options-id,client-options-id",
"log-options-id,client-options-id,client-options-id" /* Client options preferred over log options */})
public void encryptedBlobClientCustomApplicationIdInUAString(String logOptionsUA, String clientOptionsUA,
String expectedUA) {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new ApplicationIdUAStringTestClient(expectedUA))
.httpLogOptions(new HttpLogOptions().setApplicationId(logOptionsUA))
.clientOptions(new ClientOptions().setApplicationId(clientOptionsUA))
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
/**
* Tests that custom headers will be honored when using the encrypted blob client builder's default
* pipeline.
*/
@Test
public void encryptedBlobClientCustomHeadersClientOptions() {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("blob")
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.httpClient(new ClientOptionsHeadersTestClient(HEADERS_MAP))
.clientOptions(new ClientOptions().setHeaders(CLIENT_OPTIONS_HEADERS))
.buildEncryptedBlobClient();
StepVerifier.create(encryptedBlobClient.getHttpPipeline().send(request(encryptedBlobClient.getBlobUrl())))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
@Test
public void doesNotThrowOnAmbiguousCredentialsWithoutAzureSasCredential() {
assertDoesNotThrow(() -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName("foo")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new StorageSharedKeyCredential("foo", "bar"))
.credential(new MockTokenCredential())
.sasToken("foo")
.buildEncryptedBlobClient());
}
@Test
public void throwsOnAmbiguousCredentialsWithAzureSasCredential() {
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new StorageSharedKeyCredential("foo", "bar"))
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new MockTokenCredential())
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.sasToken("foo")
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT + "?sig=foo")
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new AzureSasCredential("foo"))
.buildEncryptedBlobClient());
}
@Test
public void onlyOneRetryOptionsCanBeApplied() {
assertThrows(IllegalStateException.class, () -> new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("foo")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.retryOptions(REQUEST_RETRY_OPTIONS)
.retryOptions(CORE_RETRY_OPTIONS)
.buildEncryptedBlobClient());
}
@Test
public void constructFromBlobClientBlobUserAgentModificationPolicy() {
BlobClient blobClient = new BlobClientBuilder()
.endpoint(ENDPOINT)
.credential(CREDENTIALS)
.blobName("foo")
.containerName("container")
.httpClient(new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*"))
.buildClient();
EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder()
.blobClient(blobClient)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.buildEncryptedBlobClient();
sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl());
}
@Test
public void constructFromNoClientBlobUserAgentModificationPolicy() {
EncryptedBlobClient cryptoClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.credential(new AzureSasCredential("foo"))
.httpClient(new UAStringTestClient("azstorage-clientsideencryption/1.0 azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*"))
.buildEncryptedBlobClient();
sendAndValidateUserAgentHeader(cryptoClient.getHttpPipeline(), cryptoClient.getBlobUrl());
}
private static Stream<Arguments> getNonEncodedBlobNameSupplier() {
return Stream.of(
Arguments.of("test%test"),
Arguments.of("ab2a7d5f-b973-4222-83ba-d0581817a819 %Россия 한국 中国!?/file"),
Arguments.of("%E6%96%91%E9%BB%9E"),
Arguments.of("斑點"));
}
@ParameterizedTest
@MethodSource("getNonEncodedBlobNameSupplier")
public void getNonEncodedBlobName(String originalBlobName) {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder()
.endpoint(ENDPOINT)
.containerName("container")
.blobName(originalBlobName)
.credential(CREDENTIALS)
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.buildEncryptedBlobClient();
assertEquals(encryptedBlobClient.getBlobName(), originalBlobName);
String encodedName = Utility.urlEncode(originalBlobName);
assertTrue(encryptedBlobClient.getBlobUrl().contains(encodedName));
}
@ParameterizedTest
@ValueSource(longs = { 0, -1, 15, 4L * Constants.GB })
@ParameterizedTest
@ValueSource(longs = { 16, 4 * Constants.KB, 4 * Constants.MB, Constants.GB })
public void encryptedRegionLength(long regionLength) {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.clientSideEncryptionOptions(new BlobClientSideEncryptionOptions()
.setAuthenticatedRegionDataLengthInBytes(regionLength))
.buildEncryptedBlobClient();
assertEquals(regionLength, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes());
}
@Test
public void encryptedRegionLengthDefault() {
EncryptedBlobClient encryptedBlobClient = new EncryptedBlobClientBuilder(EncryptionVersion.V2)
.blobName("foo")
.containerName("container")
.key(new FakeKey("keyId", randomData), "keyWrapAlgorithm")
.buildEncryptedBlobClient();
assertEquals(4 * Constants.MB, encryptedBlobClient.getClientSideEncryptionOptions().getAuthenticatedRegionDataLengthInBytes());
}
private static void sendAndValidateUserAgentHeader(HttpPipeline pipeline, String url) {
boolean foundPolicy = false;
for (int i = 0; i < pipeline.getPolicyCount(); i++) {
foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy);
}
assertTrue(foundPolicy);
StepVerifier.create(pipeline.send(request(url)))
.assertNext(response -> assertEquals(200, response.getStatusCode()))
.verifyComplete();
}
private static final class UAStringTestClient implements HttpClient {
private final Pattern pattern;
UAStringTestClient(String regex) {
this.pattern = Pattern.compile(regex);
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) {
throw new RuntimeException("Failed to set 'User-Agent' header.");
}
Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT));
assertTrue(matcher.matches());
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class FreshDateTestClient implements HttpClient {
private String firstDate;
@Override
public Mono<HttpResponse> send(HttpRequest request) {
if (firstDate == null) {
firstDate = request.getHeaders().getValue(HttpHeaderName.DATE);
return Mono.error(new IOException("IOException!"));
}
assertNotEquals(firstDate, request.getHeaders().getValue(HttpHeaderName.DATE));
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class ApplicationIdUAStringTestClient implements HttpClient {
private final String expectedUA;
ApplicationIdUAStringTestClient(String expectedUA) {
this.expectedUA = expectedUA;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
assertTrue(request.getHeaders().getValue(HttpHeaderName.USER_AGENT).startsWith(expectedUA));
return Mono.just(new MockHttpResponse(request, 200));
}
}
private static final class ClientOptionsHeadersTestClient implements HttpClient {
private final Map<HttpHeaderName, String> headers;
ClientOptionsHeadersTestClient(Map<HttpHeaderName, String> headers) {
this.headers = headers;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
headers.forEach((name, value) -> {
if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(name))) {
throw new RuntimeException("Failed to set custom header " + name);
}
if (name == HttpHeaderName.AUTHORIZATION) {
if (Objects.equals(value, request.getHeaders().getValue(HttpHeaderName.AUTHORIZATION))) {
throw new RuntimeException("Custom header " + name + " did not match expectation.");
}
} else {
if (!Objects.equals(value, request.getHeaders().getValue(name))) {
throw new RuntimeException("Custom header " + name + " did not match expectation.");
}
}
});
return Mono.just(new MockHttpResponse(request, 200));
}
}
}
|
thanks for your suggestions.i'll add a test case.
|
public void registerCatalog(String catalogName, Catalog catalog) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be null or empty.");
checkNotNull(catalog, "Catalog cannot be null");
if (catalogs.containsKey(catalogName)) {
throw new CatalogException(format("Catalog %s already exists.", catalogName));
}
catalog.open();
catalogs.put(catalogName, catalog);
}
|
catalogs.put(catalogName, catalog);
|
public void registerCatalog(String catalogName, Catalog catalog) {
checkArgument(!StringUtils.isNullOrWhitespaceOnly(catalogName), "Catalog name cannot be null or empty.");
checkNotNull(catalog, "Catalog cannot be null");
if (catalogs.containsKey(catalogName)) {
throw new CatalogException(format("Catalog %s already exists.", catalogName));
}
catalog.open();
catalogs.put(catalogName, catalog);
}
|
class Builder {
private @Nullable ClassLoader classLoader;
private @Nullable ReadableConfig config;
private @Nullable String defaultCatalogName;
private @Nullable Catalog defaultCatalog;
private @Nullable ExecutionConfig executionConfig;
public Builder classLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
public Builder config(ReadableConfig config) {
this.config = config;
return this;
}
public Builder defaultCatalog(String defaultCatalogName, Catalog defaultCatalog) {
this.defaultCatalogName = defaultCatalogName;
this.defaultCatalog = defaultCatalog;
return this;
}
public Builder executionConfig(ExecutionConfig executionConfig) {
this.executionConfig = executionConfig;
return this;
}
public CatalogManager build() {
checkNotNull(classLoader, "Class loader cannot be null");
checkNotNull(config, "Config cannot be null");
return new CatalogManager(
defaultCatalogName,
defaultCatalog,
new DataTypeFactoryImpl(classLoader, config, executionConfig));
}
}
|
class Builder {
private @Nullable ClassLoader classLoader;
private @Nullable ReadableConfig config;
private @Nullable String defaultCatalogName;
private @Nullable Catalog defaultCatalog;
private @Nullable ExecutionConfig executionConfig;
public Builder classLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
public Builder config(ReadableConfig config) {
this.config = config;
return this;
}
public Builder defaultCatalog(String defaultCatalogName, Catalog defaultCatalog) {
this.defaultCatalogName = defaultCatalogName;
this.defaultCatalog = defaultCatalog;
return this;
}
public Builder executionConfig(ExecutionConfig executionConfig) {
this.executionConfig = executionConfig;
return this;
}
public CatalogManager build() {
checkNotNull(classLoader, "Class loader cannot be null");
checkNotNull(config, "Config cannot be null");
return new CatalogManager(
defaultCatalogName,
defaultCatalog,
new DataTypeFactoryImpl(classLoader, config, executionConfig));
}
}
|
Just curious why we need to remove the following ``` :copy : Function.identity()); ```
|
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final SerializationSchema<RowData> keySerialization =
createSerialization(context, keyEncodingFormat, keyProjection, keyPrefix);
final SerializationSchema<RowData> valueSerialization =
createSerialization(context, valueEncodingFormat, valueProjection, null);
final KafkaSinkBuilder<RowData> sinkBuilder = KafkaSink.builder();
final List<LogicalType> physicalChildren = physicalDataType.getLogicalType().getChildren();
if (transactionalIdPrefix != null) {
sinkBuilder.setTransactionalIdPrefix(transactionalIdPrefix);
}
final KafkaSink<RowData> kafkaSink =
sinkBuilder
.setDeliverGuarantee(deliveryGuarantee)
.setBootstrapServers(
properties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).toString())
.setKafkaProducerConfig(properties)
.setRecordSerializer(
new DynamicKafkaRecordSerializationSchema(
topic,
partitioner,
keySerialization,
valueSerialization,
getFieldGetters(physicalChildren, keyProjection),
getFieldGetters(physicalChildren, valueProjection),
hasMetadata(),
getMetadataPositions(physicalChildren),
upsertMode))
.build();
if (flushMode.isEnabled() && upsertMode) {
return (DataStreamSinkProvider)
dataStream -> {
final boolean objectReuse =
dataStream
.getExecutionEnvironment()
.getConfig()
.isObjectReuseEnabled();
final ReducingUpsertSink<?> sink =
new ReducingUpsertSink<>(
kafkaSink,
physicalDataType,
keyProjection,
flushMode,
objectReuse
? createRowDataTypeSerializer(
context, dataStream.getExecutionConfig())
: null);
final DataStreamSink<RowData> end = dataStream.sinkTo(sink);
if (parallelism != null) {
end.setParallelism(parallelism);
}
return end;
};
}
return SinkProvider.of(kafkaSink, parallelism);
}
|
end.setParallelism(parallelism);
|
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final SerializationSchema<RowData> keySerialization =
createSerialization(context, keyEncodingFormat, keyProjection, keyPrefix);
final SerializationSchema<RowData> valueSerialization =
createSerialization(context, valueEncodingFormat, valueProjection, null);
final KafkaSinkBuilder<RowData> sinkBuilder = KafkaSink.builder();
final List<LogicalType> physicalChildren = physicalDataType.getLogicalType().getChildren();
if (transactionalIdPrefix != null) {
sinkBuilder.setTransactionalIdPrefix(transactionalIdPrefix);
}
final KafkaSink<RowData> kafkaSink =
sinkBuilder
.setDeliverGuarantee(deliveryGuarantee)
.setBootstrapServers(
properties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG).toString())
.setKafkaProducerConfig(properties)
.setRecordSerializer(
new DynamicKafkaRecordSerializationSchema(
topic,
partitioner,
keySerialization,
valueSerialization,
getFieldGetters(physicalChildren, keyProjection),
getFieldGetters(physicalChildren, valueProjection),
hasMetadata(),
getMetadataPositions(physicalChildren),
upsertMode))
.build();
if (flushMode.isEnabled() && upsertMode) {
return (DataStreamSinkProvider)
dataStream -> {
final boolean objectReuse =
dataStream
.getExecutionEnvironment()
.getConfig()
.isObjectReuseEnabled();
final ReducingUpsertSink<?> sink =
new ReducingUpsertSink<>(
kafkaSink,
physicalDataType,
keyProjection,
flushMode,
objectReuse
? createRowDataTypeSerializer(
context,
dataStream.getExecutionConfig())
::copy
: rowData -> rowData);
final DataStreamSink<RowData> end = dataStream.sinkTo(sink);
if (parallelism != null) {
end.setParallelism(parallelism);
}
return end;
};
}
return SinkProvider.of(kafkaSink, parallelism);
}
|
class KafkaDynamicSink implements DynamicTableSink, SupportsWritingMetadata {
/** Metadata that is appended at the end of a physical sink row. */
protected List<String> metadataKeys;
/** Data type of consumed data type. */
protected DataType consumedDataType;
/** Data type to configure the formats. */
protected final DataType physicalDataType;
/** Optional format for encoding keys to Kafka. */
protected final @Nullable EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat;
/** Format for encoding values to Kafka. */
protected final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat;
/** Indices that determine the key fields and the source position in the consumed row. */
protected final int[] keyProjection;
/** Indices that determine the value fields and the source position in the consumed row. */
protected final int[] valueProjection;
/** Prefix that needs to be removed from fields when constructing the physical data type. */
protected final @Nullable String keyPrefix;
/** The defined delivery guarantee. */
private final DeliveryGuarantee deliveryGuarantee;
/**
* If the {@link
* prefix for all ids of opened Kafka transactions.
*/
@Nullable private final String transactionalIdPrefix;
/** The Kafka topic to write to. */
protected final String topic;
/** Properties for the Kafka producer. */
protected final Properties properties;
/** Partitioner to select Kafka partition for each item. */
protected final @Nullable FlinkKafkaPartitioner<RowData> partitioner;
/**
* Flag to determine sink mode. In upsert mode sink transforms the delete/update-before message
* to tombstone message.
*/
protected final boolean upsertMode;
/** Sink buffer flush config which only supported in upsert mode now. */
protected final SinkBufferFlushMode flushMode;
/** Parallelism of the physical Kafka producer. * */
protected final @Nullable Integer parallelism;
public KafkaDynamicSink(
DataType consumedDataType,
DataType physicalDataType,
@Nullable EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat,
EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat,
int[] keyProjection,
int[] valueProjection,
@Nullable String keyPrefix,
String topic,
Properties properties,
@Nullable FlinkKafkaPartitioner<RowData> partitioner,
DeliveryGuarantee deliveryGuarantee,
boolean upsertMode,
SinkBufferFlushMode flushMode,
@Nullable Integer parallelism,
@Nullable String transactionalIdPrefix) {
this.consumedDataType =
checkNotNull(consumedDataType, "Consumed data type must not be null.");
this.physicalDataType =
checkNotNull(physicalDataType, "Physical data type must not be null.");
this.keyEncodingFormat = keyEncodingFormat;
this.valueEncodingFormat =
checkNotNull(valueEncodingFormat, "Value encoding format must not be null.");
this.keyProjection = checkNotNull(keyProjection, "Key projection must not be null.");
this.valueProjection = checkNotNull(valueProjection, "Value projection must not be null.");
this.keyPrefix = keyPrefix;
this.transactionalIdPrefix = transactionalIdPrefix;
this.metadataKeys = Collections.emptyList();
this.topic = checkNotNull(topic, "Topic must not be null.");
this.properties = checkNotNull(properties, "Properties must not be null.");
this.partitioner = partitioner;
this.deliveryGuarantee =
checkNotNull(deliveryGuarantee, "DeliveryGuarantee must not be null.");
this.upsertMode = upsertMode;
this.flushMode = checkNotNull(flushMode);
if (flushMode.isEnabled() && !upsertMode) {
throw new IllegalArgumentException(
"Sink buffer flush is only supported in upsert-kafka.");
}
this.parallelism = parallelism;
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
return valueEncodingFormat.getChangelogMode();
}
@Override
@Override
public Map<String, DataType> listWritableMetadata() {
final Map<String, DataType> metadataMap = new LinkedHashMap<>();
Stream.of(WritableMetadata.values())
.forEachOrdered(m -> metadataMap.put(m.key, m.dataType));
return metadataMap;
}
@Override
public void applyWritableMetadata(List<String> metadataKeys, DataType consumedDataType) {
this.metadataKeys = metadataKeys;
this.consumedDataType = consumedDataType;
}
@Override
public DynamicTableSink copy() {
final KafkaDynamicSink copy =
new KafkaDynamicSink(
consumedDataType,
physicalDataType,
keyEncodingFormat,
valueEncodingFormat,
keyProjection,
valueProjection,
keyPrefix,
topic,
properties,
partitioner,
deliveryGuarantee,
upsertMode,
flushMode,
parallelism,
transactionalIdPrefix);
copy.metadataKeys = metadataKeys;
return copy;
}
@Override
public String asSummaryString() {
return "Kafka table sink";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final KafkaDynamicSink that = (KafkaDynamicSink) o;
return Objects.equals(metadataKeys, that.metadataKeys)
&& Objects.equals(consumedDataType, that.consumedDataType)
&& Objects.equals(physicalDataType, that.physicalDataType)
&& Objects.equals(keyEncodingFormat, that.keyEncodingFormat)
&& Objects.equals(valueEncodingFormat, that.valueEncodingFormat)
&& Arrays.equals(keyProjection, that.keyProjection)
&& Arrays.equals(valueProjection, that.valueProjection)
&& Objects.equals(keyPrefix, that.keyPrefix)
&& Objects.equals(topic, that.topic)
&& Objects.equals(properties, that.properties)
&& Objects.equals(partitioner, that.partitioner)
&& Objects.equals(deliveryGuarantee, that.deliveryGuarantee)
&& Objects.equals(upsertMode, that.upsertMode)
&& Objects.equals(flushMode, that.flushMode)
&& Objects.equals(transactionalIdPrefix, that.transactionalIdPrefix)
&& Objects.equals(parallelism, that.parallelism);
}
@Override
public int hashCode() {
return Objects.hash(
metadataKeys,
consumedDataType,
physicalDataType,
keyEncodingFormat,
valueEncodingFormat,
keyProjection,
valueProjection,
keyPrefix,
topic,
properties,
partitioner,
deliveryGuarantee,
upsertMode,
flushMode,
transactionalIdPrefix,
parallelism);
}
private TypeSerializer<RowData> createRowDataTypeSerializer(
Context context, ExecutionConfig executionConfig) {
final TypeInformation<RowData> typeInformation =
context.createTypeInformation(consumedDataType);
return typeInformation.createSerializer(executionConfig);
}
private int[] getMetadataPositions(List<LogicalType> physicalChildren) {
return Stream.of(WritableMetadata.values())
.mapToInt(
m -> {
final int pos = metadataKeys.indexOf(m.key);
if (pos < 0) {
return -1;
}
return physicalChildren.size() + pos;
})
.toArray();
}
private boolean hasMetadata() {
return metadataKeys.size() > 0;
}
private RowData.FieldGetter[] getFieldGetters(
List<LogicalType> physicalChildren, int[] keyProjection) {
return Arrays.stream(keyProjection)
.mapToObj(
targetField ->
RowData.createFieldGetter(
physicalChildren.get(targetField), targetField))
.toArray(RowData.FieldGetter[]::new);
}
private @Nullable SerializationSchema<RowData> createSerialization(
DynamicTableSink.Context context,
@Nullable EncodingFormat<SerializationSchema<RowData>> format,
int[] projection,
@Nullable String prefix) {
if (format == null) {
return null;
}
DataType physicalFormatDataType =
DataTypeUtils.projectRow(this.physicalDataType, projection);
if (prefix != null) {
physicalFormatDataType = DataTypeUtils.stripRowPrefix(physicalFormatDataType, prefix);
}
return format.createRuntimeEncoder(context, physicalFormatDataType);
}
enum WritableMetadata {
HEADERS(
"headers",
DataTypes.MAP(DataTypes.STRING().nullable(), DataTypes.BYTES().nullable())
.nullable(),
new MetadataConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object read(RowData row, int pos) {
if (row.isNullAt(pos)) {
return null;
}
final MapData map = row.getMap(pos);
final ArrayData keyArray = map.keyArray();
final ArrayData valueArray = map.valueArray();
final List<Header> headers = new ArrayList<>();
for (int i = 0; i < keyArray.size(); i++) {
if (!keyArray.isNullAt(i) && !valueArray.isNullAt(i)) {
final String key = keyArray.getString(i).toString();
final byte[] value = valueArray.getBinary(i);
headers.add(new KafkaHeader(key, value));
}
}
return headers;
}
}),
TIMESTAMP(
"timestamp",
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).nullable(),
new MetadataConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object read(RowData row, int pos) {
if (row.isNullAt(pos)) {
return null;
}
return row.getTimestamp(pos, 3).getMillisecond();
}
});
final String key;
final DataType dataType;
final MetadataConverter converter;
WritableMetadata(String key, DataType dataType, MetadataConverter converter) {
this.key = key;
this.dataType = dataType;
this.converter = converter;
}
}
interface MetadataConverter extends Serializable {
Object read(RowData consumedRow, int pos);
}
private static class KafkaHeader implements Header {
private final String key;
private final byte[] value;
KafkaHeader(String key, byte[] value) {
this.key = key;
this.value = value;
}
@Override
public String key() {
return key;
}
@Override
public byte[] value() {
return value;
}
}
}
|
class KafkaDynamicSink implements DynamicTableSink, SupportsWritingMetadata {
/** Metadata that is appended at the end of a physical sink row. */
protected List<String> metadataKeys;
/** Data type of consumed data type. */
protected DataType consumedDataType;
/** Data type to configure the formats. */
protected final DataType physicalDataType;
/** Optional format for encoding keys to Kafka. */
protected final @Nullable EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat;
/** Format for encoding values to Kafka. */
protected final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat;
/** Indices that determine the key fields and the source position in the consumed row. */
protected final int[] keyProjection;
/** Indices that determine the value fields and the source position in the consumed row. */
protected final int[] valueProjection;
/** Prefix that needs to be removed from fields when constructing the physical data type. */
protected final @Nullable String keyPrefix;
/** The defined delivery guarantee. */
private final DeliveryGuarantee deliveryGuarantee;
/**
* If the {@link
* prefix for all ids of opened Kafka transactions.
*/
@Nullable private final String transactionalIdPrefix;
/** The Kafka topic to write to. */
protected final String topic;
/** Properties for the Kafka producer. */
protected final Properties properties;
/** Partitioner to select Kafka partition for each item. */
protected final @Nullable FlinkKafkaPartitioner<RowData> partitioner;
/**
* Flag to determine sink mode. In upsert mode sink transforms the delete/update-before message
* to tombstone message.
*/
protected final boolean upsertMode;
/** Sink buffer flush config which only supported in upsert mode now. */
protected final SinkBufferFlushMode flushMode;
/** Parallelism of the physical Kafka producer. * */
protected final @Nullable Integer parallelism;
public KafkaDynamicSink(
DataType consumedDataType,
DataType physicalDataType,
@Nullable EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat,
EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat,
int[] keyProjection,
int[] valueProjection,
@Nullable String keyPrefix,
String topic,
Properties properties,
@Nullable FlinkKafkaPartitioner<RowData> partitioner,
DeliveryGuarantee deliveryGuarantee,
boolean upsertMode,
SinkBufferFlushMode flushMode,
@Nullable Integer parallelism,
@Nullable String transactionalIdPrefix) {
this.consumedDataType =
checkNotNull(consumedDataType, "Consumed data type must not be null.");
this.physicalDataType =
checkNotNull(physicalDataType, "Physical data type must not be null.");
this.keyEncodingFormat = keyEncodingFormat;
this.valueEncodingFormat =
checkNotNull(valueEncodingFormat, "Value encoding format must not be null.");
this.keyProjection = checkNotNull(keyProjection, "Key projection must not be null.");
this.valueProjection = checkNotNull(valueProjection, "Value projection must not be null.");
this.keyPrefix = keyPrefix;
this.transactionalIdPrefix = transactionalIdPrefix;
this.metadataKeys = Collections.emptyList();
this.topic = checkNotNull(topic, "Topic must not be null.");
this.properties = checkNotNull(properties, "Properties must not be null.");
this.partitioner = partitioner;
this.deliveryGuarantee =
checkNotNull(deliveryGuarantee, "DeliveryGuarantee must not be null.");
this.upsertMode = upsertMode;
this.flushMode = checkNotNull(flushMode);
if (flushMode.isEnabled() && !upsertMode) {
throw new IllegalArgumentException(
"Sink buffer flush is only supported in upsert-kafka.");
}
this.parallelism = parallelism;
}
@Override
public ChangelogMode getChangelogMode(ChangelogMode requestedMode) {
return valueEncodingFormat.getChangelogMode();
}
@Override
@Override
public Map<String, DataType> listWritableMetadata() {
final Map<String, DataType> metadataMap = new LinkedHashMap<>();
Stream.of(WritableMetadata.values())
.forEachOrdered(m -> metadataMap.put(m.key, m.dataType));
return metadataMap;
}
@Override
public void applyWritableMetadata(List<String> metadataKeys, DataType consumedDataType) {
this.metadataKeys = metadataKeys;
this.consumedDataType = consumedDataType;
}
@Override
public DynamicTableSink copy() {
final KafkaDynamicSink copy =
new KafkaDynamicSink(
consumedDataType,
physicalDataType,
keyEncodingFormat,
valueEncodingFormat,
keyProjection,
valueProjection,
keyPrefix,
topic,
properties,
partitioner,
deliveryGuarantee,
upsertMode,
flushMode,
parallelism,
transactionalIdPrefix);
copy.metadataKeys = metadataKeys;
return copy;
}
@Override
public String asSummaryString() {
return "Kafka table sink";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final KafkaDynamicSink that = (KafkaDynamicSink) o;
return Objects.equals(metadataKeys, that.metadataKeys)
&& Objects.equals(consumedDataType, that.consumedDataType)
&& Objects.equals(physicalDataType, that.physicalDataType)
&& Objects.equals(keyEncodingFormat, that.keyEncodingFormat)
&& Objects.equals(valueEncodingFormat, that.valueEncodingFormat)
&& Arrays.equals(keyProjection, that.keyProjection)
&& Arrays.equals(valueProjection, that.valueProjection)
&& Objects.equals(keyPrefix, that.keyPrefix)
&& Objects.equals(topic, that.topic)
&& Objects.equals(properties, that.properties)
&& Objects.equals(partitioner, that.partitioner)
&& Objects.equals(deliveryGuarantee, that.deliveryGuarantee)
&& Objects.equals(upsertMode, that.upsertMode)
&& Objects.equals(flushMode, that.flushMode)
&& Objects.equals(transactionalIdPrefix, that.transactionalIdPrefix)
&& Objects.equals(parallelism, that.parallelism);
}
@Override
public int hashCode() {
return Objects.hash(
metadataKeys,
consumedDataType,
physicalDataType,
keyEncodingFormat,
valueEncodingFormat,
keyProjection,
valueProjection,
keyPrefix,
topic,
properties,
partitioner,
deliveryGuarantee,
upsertMode,
flushMode,
transactionalIdPrefix,
parallelism);
}
private TypeSerializer<RowData> createRowDataTypeSerializer(
Context context, ExecutionConfig executionConfig) {
final TypeInformation<RowData> typeInformation =
context.createTypeInformation(consumedDataType);
return typeInformation.createSerializer(executionConfig);
}
private int[] getMetadataPositions(List<LogicalType> physicalChildren) {
return Stream.of(WritableMetadata.values())
.mapToInt(
m -> {
final int pos = metadataKeys.indexOf(m.key);
if (pos < 0) {
return -1;
}
return physicalChildren.size() + pos;
})
.toArray();
}
private boolean hasMetadata() {
return metadataKeys.size() > 0;
}
private RowData.FieldGetter[] getFieldGetters(
List<LogicalType> physicalChildren, int[] keyProjection) {
return Arrays.stream(keyProjection)
.mapToObj(
targetField ->
RowData.createFieldGetter(
physicalChildren.get(targetField), targetField))
.toArray(RowData.FieldGetter[]::new);
}
private @Nullable SerializationSchema<RowData> createSerialization(
DynamicTableSink.Context context,
@Nullable EncodingFormat<SerializationSchema<RowData>> format,
int[] projection,
@Nullable String prefix) {
if (format == null) {
return null;
}
DataType physicalFormatDataType =
DataTypeUtils.projectRow(this.physicalDataType, projection);
if (prefix != null) {
physicalFormatDataType = DataTypeUtils.stripRowPrefix(physicalFormatDataType, prefix);
}
return format.createRuntimeEncoder(context, physicalFormatDataType);
}
enum WritableMetadata {
HEADERS(
"headers",
DataTypes.MAP(DataTypes.STRING().nullable(), DataTypes.BYTES().nullable())
.nullable(),
new MetadataConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object read(RowData row, int pos) {
if (row.isNullAt(pos)) {
return null;
}
final MapData map = row.getMap(pos);
final ArrayData keyArray = map.keyArray();
final ArrayData valueArray = map.valueArray();
final List<Header> headers = new ArrayList<>();
for (int i = 0; i < keyArray.size(); i++) {
if (!keyArray.isNullAt(i) && !valueArray.isNullAt(i)) {
final String key = keyArray.getString(i).toString();
final byte[] value = valueArray.getBinary(i);
headers.add(new KafkaHeader(key, value));
}
}
return headers;
}
}),
TIMESTAMP(
"timestamp",
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3).nullable(),
new MetadataConverter() {
private static final long serialVersionUID = 1L;
@Override
public Object read(RowData row, int pos) {
if (row.isNullAt(pos)) {
return null;
}
return row.getTimestamp(pos, 3).getMillisecond();
}
});
final String key;
final DataType dataType;
final MetadataConverter converter;
WritableMetadata(String key, DataType dataType, MetadataConverter converter) {
this.key = key;
this.dataType = dataType;
this.converter = converter;
}
}
interface MetadataConverter extends Serializable {
Object read(RowData consumedRow, int pos);
}
private static class KafkaHeader implements Header {
private final String key;
private final byte[] value;
KafkaHeader(String key, byte[] value) {
this.key = key;
this.value = value;
}
@Override
public String key() {
return key;
}
@Override
public byte[] value() {
return value;
}
}
}
|
This implies that we have to buffer elements up to watermark + allowed lateness.
|
public void processElement(WindowedValue<InputT> input) {
for (WindowedValue<InputT> value : input.explodeWindows()) {
BoundedWindow window = value.getWindows().iterator().next();
if (isLate(window)) {
reportDroppedElement(value, window);
} else if (requiresTimeSortedInput) {
processElementOrdered(window, value);
} else {
processElementUnordered(window, value);
}
}
}
|
} else {
|
public void processElement(WindowedValue<InputT> input) {
for (WindowedValue<InputT> value : input.explodeWindows()) {
BoundedWindow window = value.getWindows().iterator().next();
if (isLate(window)) {
reportDroppedElement(value, window);
} else if (requiresTimeSortedInput) {
processElementOrdered(window, value);
} else {
processElementUnordered(window, value);
}
}
}
|
class StatefulDoFnRunner<InputT, OutputT, W extends BoundedWindow>
implements DoFnRunner<InputT, OutputT> {
public static final String DROPPED_DUE_TO_LATENESS_COUNTER = "StatefulParDoDropped";
private static final String SORT_BUFFER_STATE = "sortBuffer";
private static final String SORT_BUFFER_MIN_STAMP = "sortBufferMinStamp";
private static final String SORT_FLUSH_TIMER = "__StatefulParDoSortFlushTimerId";
private static final String SORT_FLUSH_WATERMARK_HOLD = "flushWatermarkHold";
private final DoFnRunner<InputT, OutputT> doFnRunner;
private final StepContext stepContext;
private final WindowingStrategy<?, ?> windowingStrategy;
private final Counter droppedDueToLateness =
Metrics.counter(StatefulDoFnRunner.class, DROPPED_DUE_TO_LATENESS_COUNTER);
private final CleanupTimer<InputT> cleanupTimer;
private final StateCleaner stateCleaner;
private final boolean requiresTimeSortedInput;
private final Coder<BoundedWindow> windowCoder;
private final StateTag<BagState<WindowedValue<InputT>>> sortBufferTag;
private final StateTag<ValueState<Instant>> sortBufferMinStampTag =
StateTags.makeSystemTagInternal(StateTags.value(SORT_BUFFER_MIN_STAMP, InstantCoder.of()));
private final StateTag<WatermarkHoldState> watermarkHold =
StateTags.watermarkStateInternal(SORT_FLUSH_WATERMARK_HOLD, TimestampCombiner.LATEST);
public StatefulDoFnRunner(
DoFnRunner<InputT, OutputT> doFnRunner,
Coder<InputT> inputCoder,
StepContext stepContext,
WindowingStrategy<?, ?> windowingStrategy,
CleanupTimer<InputT> cleanupTimer,
StateCleaner<W> stateCleaner,
boolean requiresTimeSortedInput) {
this.doFnRunner = doFnRunner;
this.stepContext = stepContext;
this.windowingStrategy = windowingStrategy;
this.cleanupTimer = cleanupTimer;
this.stateCleaner = stateCleaner;
this.requiresTimeSortedInput = requiresTimeSortedInput;
WindowFn<?, ?> windowFn = windowingStrategy.getWindowFn();
@SuppressWarnings("unchecked")
Coder<BoundedWindow> untypedCoder = (Coder<BoundedWindow>) windowFn.windowCoder();
this.windowCoder = untypedCoder;
this.sortBufferTag =
StateTags.makeSystemTagInternal(
StateTags.bag(SORT_BUFFER_STATE, WindowedValue.getFullCoder(inputCoder, windowCoder)));
rejectMergingWindowFn(windowFn);
}
private void rejectMergingWindowFn(WindowFn<?, ?> windowFn) {
if (!(windowFn instanceof NonMergingWindowFn)) {
throw new UnsupportedOperationException(
"MergingWindowFn is not supported for stateful DoFns, WindowFn is: " + windowFn);
}
}
public List<StateTag<?>> getSystemStateTags() {
return Arrays.asList(sortBufferTag, sortBufferMinStampTag, watermarkHold);
}
@Override
public DoFn<InputT, OutputT> getFn() {
return doFnRunner.getFn();
}
@Override
public void startBundle() {
doFnRunner.startBundle();
}
@Override
public void finishBundle() {
doFnRunner.finishBundle();
}
@Override
private void processElementUnordered(BoundedWindow window, WindowedValue<InputT> value) {
cleanupTimer.setForWindow(value.getValue(), window);
doFnRunner.processElement(value);
}
private void processElementOrdered(BoundedWindow window, WindowedValue<InputT> value) {
StateInternals stateInternals = stepContext.stateInternals();
TimerInternals timerInternals = stepContext.timerInternals();
if (!timerInternals.currentInputWatermarkTime().isAfter(value.getTimestamp())) {
StateNamespace namespace = StateNamespaces.window(windowCoder, window);
BagState<WindowedValue<InputT>> sortBuffer = stateInternals.state(namespace, sortBufferTag);
ValueState<Instant> minStampState = stateInternals.state(namespace, sortBufferMinStampTag);
sortBuffer.add(value);
Instant minStamp =
MoreObjects.firstNonNull(minStampState.read(), BoundedWindow.TIMESTAMP_MAX_VALUE);
if (value.getTimestamp().isBefore(minStamp)) {
minStamp = value.getTimestamp();
minStampState.write(minStamp);
setupFlushTimerAndWatermarkHold(namespace, minStamp);
}
} else {
reportDroppedElement(value, window);
}
}
private boolean isLate(BoundedWindow window) {
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
Instant inputWM = stepContext.timerInternals().currentInputWatermarkTime();
return gcTime.isBefore(inputWM);
}
private void reportDroppedElement(WindowedValue<InputT> value, BoundedWindow window) {
droppedDueToLateness.inc();
WindowTracing.debug(
"StatefulDoFnRunner.processElement: Dropping element at {}; window:{} "
+ "since too far behind inputWatermark:{}",
value.getTimestamp(),
window,
stepContext.timerInternals().currentInputWatermarkTime());
}
@Override
public void onTimer(
String timerId, BoundedWindow window, Instant timestamp, TimeDomain timeDomain) {
if (timerId.equals(SORT_FLUSH_TIMER)) {
onSortFlushTimer(window, stepContext.timerInternals().currentInputWatermarkTime());
} else if (cleanupTimer.isForWindow(timerId, window, timestamp, timeDomain)) {
stateCleaner.clearForWindow(window);
} else {
if (!timeDomain.equals(TimeDomain.EVENT_TIME) && isLate(window)) {
WindowTracing.debug(
"StatefulDoFnRunner.onTimer: Ignoring processing-time timer at {}; window:{} "
+ "since window is too far behind inputWatermark:{}",
timestamp,
window,
stepContext.timerInternals().currentInputWatermarkTime());
} else {
doFnRunner.onTimer(timerId, window, timestamp, timeDomain);
}
}
}
private void onSortFlushTimer(BoundedWindow window, Instant timestamp) {
StateInternals stateInternals = stepContext.stateInternals();
StateNamespace namespace = StateNamespaces.window(windowCoder, window);
BagState<WindowedValue<InputT>> sortBuffer = stateInternals.state(namespace, sortBufferTag);
ValueState<Instant> minStampState = stateInternals.state(namespace, sortBufferMinStampTag);
List<WindowedValue<InputT>> keep = new ArrayList<>();
List<WindowedValue<InputT>> flush = new ArrayList<>();
Instant newMinStamp = BoundedWindow.TIMESTAMP_MAX_VALUE;
for (WindowedValue<InputT> e : sortBuffer.read()) {
if (!e.getTimestamp().isAfter(timestamp)) {
flush.add(e);
} else {
keep.add(e);
if (e.getTimestamp().isBefore(newMinStamp)) {
newMinStamp = e.getTimestamp();
}
}
}
flush.stream()
.sorted((a, b) -> a.getTimestamp().compareTo(b.getTimestamp()))
.forEachOrdered(e -> processElementUnordered(window, e));
sortBuffer.clear();
keep.forEach(sortBuffer::add);
minStampState.write(newMinStamp);
if (newMinStamp.isBefore(BoundedWindow.TIMESTAMP_MAX_VALUE)) {
setupFlushTimerAndWatermarkHold(namespace, newMinStamp);
} else {
clearWatermarkHold(namespace);
}
}
private void setupFlushTimerAndWatermarkHold(StateNamespace namespace, Instant flush) {
WatermarkHoldState watermark = stepContext.stateInternals().state(namespace, watermarkHold);
stepContext
.timerInternals()
.setTimer(namespace, SORT_FLUSH_TIMER, flush, TimeDomain.EVENT_TIME);
watermark.clear();
watermark.add(flush);
}
private void clearWatermarkHold(StateNamespace namespace) {
stepContext.stateInternals().state(namespace, watermarkHold).clear();
}
/**
* A cleaner for deciding when to clean state of window.
*
* <p>A runner might either (a) already know that it always has a timer set for the expiration
* time or (b) not need a timer at all because it is a batch runner that discards state when it is
* done.
*/
public interface CleanupTimer<InputT> {
/** Set the garbage collect time of the window to timer. */
void setForWindow(InputT value, BoundedWindow window);
/** Checks whether the given timer is a cleanup timer for the window. */
boolean isForWindow(
String timerId, BoundedWindow window, Instant timestamp, TimeDomain timeDomain);
}
/** A cleaner to clean all states of the window. */
public interface StateCleaner<W extends BoundedWindow> {
void clearForWindow(W window);
}
/** A {@link StatefulDoFnRunner.CleanupTimer} implemented via {@link TimerInternals}. */
public static class TimeInternalsCleanupTimer<InputT>
implements StatefulDoFnRunner.CleanupTimer<InputT> {
public static final String GC_TIMER_ID = "__StatefulParDoGcTimerId";
/**
* The amount of milliseconds by which to delay cleanup. We use this to ensure that state is
* still available when a user timer for {@code window.maxTimestamp()} fires.
*/
public static final long GC_DELAY_MS = 1;
private final TimerInternals timerInternals;
private final WindowingStrategy<?, ?> windowingStrategy;
private final Coder<BoundedWindow> windowCoder;
public TimeInternalsCleanupTimer(
TimerInternals timerInternals, WindowingStrategy<?, ?> windowingStrategy) {
this.windowingStrategy = windowingStrategy;
WindowFn<?, ?> windowFn = windowingStrategy.getWindowFn();
windowCoder = (Coder<BoundedWindow>) windowFn.windowCoder();
this.timerInternals = timerInternals;
}
@Override
public void setForWindow(InputT input, BoundedWindow window) {
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
gcTime = gcTime.plus(GC_DELAY_MS);
timerInternals.setTimer(
StateNamespaces.window(windowCoder, window), GC_TIMER_ID, gcTime, TimeDomain.EVENT_TIME);
}
@Override
public boolean isForWindow(
String timerId, BoundedWindow window, Instant timestamp, TimeDomain timeDomain) {
boolean isEventTimer = timeDomain.equals(TimeDomain.EVENT_TIME);
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
gcTime = gcTime.plus(GC_DELAY_MS);
return isEventTimer && GC_TIMER_ID.equals(timerId) && gcTime.equals(timestamp);
}
}
/** A {@link StatefulDoFnRunner.StateCleaner} implemented via {@link StateInternals}. */
public static class StateInternalsStateCleaner<W extends BoundedWindow>
implements StatefulDoFnRunner.StateCleaner<W> {
private final DoFn<?, ?> fn;
private final DoFnSignature signature;
private final StateInternals stateInternals;
private final Coder<W> windowCoder;
public StateInternalsStateCleaner(
DoFn<?, ?> fn, StateInternals stateInternals, Coder<W> windowCoder) {
this.fn = fn;
this.signature = DoFnSignatures.getSignature(fn.getClass());
this.stateInternals = stateInternals;
this.windowCoder = windowCoder;
}
@Override
public void clearForWindow(W window) {
for (Map.Entry<String, DoFnSignature.StateDeclaration> entry :
signature.stateDeclarations().entrySet()) {
try {
StateSpec<?> spec = (StateSpec<?>) entry.getValue().field().get(fn);
State state =
stateInternals.state(
StateNamespaces.window(windowCoder, window),
StateTags.tagForSpec(entry.getKey(), (StateSpec) spec));
state.clear();
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
}
|
class StatefulDoFnRunner<InputT, OutputT, W extends BoundedWindow>
implements DoFnRunner<InputT, OutputT> {
public static final String DROPPED_DUE_TO_LATENESS_COUNTER = "StatefulParDoDropped";
private static final String SORT_BUFFER_STATE = "sortBuffer";
private static final String SORT_BUFFER_MIN_STAMP = "sortBufferMinStamp";
private static final String SORT_FLUSH_TIMER = "__StatefulParDoSortFlushTimerId";
private static final String SORT_FLUSH_WATERMARK_HOLD = "flushWatermarkHold";
private final DoFnRunner<InputT, OutputT> doFnRunner;
private final StepContext stepContext;
private final WindowingStrategy<?, ?> windowingStrategy;
private final Counter droppedDueToLateness =
Metrics.counter(StatefulDoFnRunner.class, DROPPED_DUE_TO_LATENESS_COUNTER);
private final CleanupTimer<InputT> cleanupTimer;
private final StateCleaner stateCleaner;
private final boolean requiresTimeSortedInput;
private final Coder<BoundedWindow> windowCoder;
private final StateTag<BagState<WindowedValue<InputT>>> sortBufferTag;
private final StateTag<ValueState<Instant>> sortBufferMinStampTag =
StateTags.makeSystemTagInternal(StateTags.value(SORT_BUFFER_MIN_STAMP, InstantCoder.of()));
private final StateTag<WatermarkHoldState> watermarkHold =
StateTags.watermarkStateInternal(SORT_FLUSH_WATERMARK_HOLD, TimestampCombiner.LATEST);
public StatefulDoFnRunner(
DoFnRunner<InputT, OutputT> doFnRunner,
Coder<InputT> inputCoder,
StepContext stepContext,
WindowingStrategy<?, ?> windowingStrategy,
CleanupTimer<InputT> cleanupTimer,
StateCleaner<W> stateCleaner,
boolean requiresTimeSortedInput) {
this.doFnRunner = doFnRunner;
this.stepContext = stepContext;
this.windowingStrategy = windowingStrategy;
this.cleanupTimer = cleanupTimer;
this.stateCleaner = stateCleaner;
this.requiresTimeSortedInput = requiresTimeSortedInput;
WindowFn<?, ?> windowFn = windowingStrategy.getWindowFn();
@SuppressWarnings("unchecked")
Coder<BoundedWindow> untypedCoder = (Coder<BoundedWindow>) windowFn.windowCoder();
this.windowCoder = untypedCoder;
this.sortBufferTag =
StateTags.makeSystemTagInternal(
StateTags.bag(SORT_BUFFER_STATE, WindowedValue.getFullCoder(inputCoder, windowCoder)));
rejectMergingWindowFn(windowFn);
}
private void rejectMergingWindowFn(WindowFn<?, ?> windowFn) {
if (!(windowFn instanceof NonMergingWindowFn)) {
throw new UnsupportedOperationException(
"MergingWindowFn is not supported for stateful DoFns, WindowFn is: " + windowFn);
}
}
public List<StateTag<?>> getSystemStateTags() {
return Arrays.asList(sortBufferTag, sortBufferMinStampTag, watermarkHold);
}
@Override
public DoFn<InputT, OutputT> getFn() {
return doFnRunner.getFn();
}
@Override
public void startBundle() {
doFnRunner.startBundle();
}
@Override
public void finishBundle() {
doFnRunner.finishBundle();
}
@Override
private void processElementUnordered(BoundedWindow window, WindowedValue<InputT> value) {
cleanupTimer.setForWindow(value.getValue(), window);
doFnRunner.processElement(value);
}
private void processElementOrdered(BoundedWindow window, WindowedValue<InputT> value) {
StateInternals stateInternals = stepContext.stateInternals();
TimerInternals timerInternals = stepContext.timerInternals();
Instant outputWatermark =
MoreObjects.firstNonNull(
timerInternals.currentOutputWatermarkTime(), BoundedWindow.TIMESTAMP_MIN_VALUE);
if (!outputWatermark.isAfter(
value.getTimestamp().plus(windowingStrategy.getAllowedLateness()))) {
StateNamespace namespace = StateNamespaces.window(windowCoder, window);
BagState<WindowedValue<InputT>> sortBuffer = stateInternals.state(namespace, sortBufferTag);
ValueState<Instant> minStampState = stateInternals.state(namespace, sortBufferMinStampTag);
sortBuffer.add(value);
Instant minStamp =
MoreObjects.firstNonNull(minStampState.read(), BoundedWindow.TIMESTAMP_MAX_VALUE);
if (value.getTimestamp().isBefore(minStamp)) {
minStamp = value.getTimestamp();
minStampState.write(minStamp);
setupFlushTimerAndWatermarkHold(namespace, minStamp);
}
} else {
reportDroppedElement(value, window);
}
}
private boolean isLate(BoundedWindow window) {
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
Instant inputWM = stepContext.timerInternals().currentInputWatermarkTime();
return gcTime.isBefore(inputWM);
}
private void reportDroppedElement(WindowedValue<InputT> value, BoundedWindow window) {
droppedDueToLateness.inc();
WindowTracing.debug(
"StatefulDoFnRunner.processElement: Dropping element at {}; window:{} "
+ "since too far behind inputWatermark:{}",
value.getTimestamp(),
window,
stepContext.timerInternals().currentInputWatermarkTime());
}
@Override
public void onTimer(
String timerId,
String timerFamilyId,
BoundedWindow window,
Instant timestamp,
Instant outputTimestamp,
TimeDomain timeDomain) {
if (timerId.equals(SORT_FLUSH_TIMER)) {
onSortFlushTimer(window, stepContext.timerInternals().currentInputWatermarkTime());
} else if (cleanupTimer.isForWindow(timerId, window, timestamp, timeDomain)) {
stateCleaner.clearForWindow(window);
} else {
if (!timeDomain.equals(TimeDomain.EVENT_TIME) && isLate(window)) {
WindowTracing.debug(
"StatefulDoFnRunner.onTimer: Ignoring processing-time timer at {}; window:{} "
+ "since window is too far behind inputWatermark:{}",
timestamp,
window,
stepContext.timerInternals().currentInputWatermarkTime());
} else {
doFnRunner.onTimer(timerId, timerFamilyId, window, timestamp, outputTimestamp, timeDomain);
}
}
}
private void onSortFlushTimer(BoundedWindow window, Instant timestamp) {
StateInternals stateInternals = stepContext.stateInternals();
StateNamespace namespace = StateNamespaces.window(windowCoder, window);
BagState<WindowedValue<InputT>> sortBuffer = stateInternals.state(namespace, sortBufferTag);
ValueState<Instant> minStampState = stateInternals.state(namespace, sortBufferMinStampTag);
List<WindowedValue<InputT>> keep = new ArrayList<>();
List<WindowedValue<InputT>> flush = new ArrayList<>();
Instant newMinStamp = BoundedWindow.TIMESTAMP_MAX_VALUE;
for (WindowedValue<InputT> e : sortBuffer.read()) {
if (!e.getTimestamp().isAfter(timestamp)) {
flush.add(e);
} else {
keep.add(e);
if (e.getTimestamp().isBefore(newMinStamp)) {
newMinStamp = e.getTimestamp();
}
}
}
flush.stream()
.sorted(Comparator.comparing(WindowedValue::getTimestamp))
.forEachOrdered(e -> processElementUnordered(window, e));
sortBuffer.clear();
keep.forEach(sortBuffer::add);
minStampState.write(newMinStamp);
if (newMinStamp.isBefore(BoundedWindow.TIMESTAMP_MAX_VALUE)) {
setupFlushTimerAndWatermarkHold(namespace, newMinStamp);
} else {
clearWatermarkHold(namespace);
}
}
private void setupFlushTimerAndWatermarkHold(StateNamespace namespace, Instant flush) {
WatermarkHoldState watermark = stepContext.stateInternals().state(namespace, watermarkHold);
stepContext
.timerInternals()
.setTimer(
namespace, SORT_FLUSH_TIMER, SORT_FLUSH_TIMER, flush, flush, TimeDomain.EVENT_TIME);
watermark.clear();
watermark.add(flush);
}
private void clearWatermarkHold(StateNamespace namespace) {
stepContext.stateInternals().state(namespace, watermarkHold).clear();
}
/**
* A cleaner for deciding when to clean state of window.
*
* <p>A runner might either (a) already know that it always has a timer set for the expiration
* time or (b) not need a timer at all because it is a batch runner that discards state when it is
* done.
*/
public interface CleanupTimer<InputT> {
/** Set the garbage collect time of the window to timer. */
void setForWindow(InputT value, BoundedWindow window);
/** Checks whether the given timer is a cleanup timer for the window. */
boolean isForWindow(
String timerId, BoundedWindow window, Instant timestamp, TimeDomain timeDomain);
}
/** A cleaner to clean all states of the window. */
public interface StateCleaner<W extends BoundedWindow> {
void clearForWindow(W window);
}
/** A {@link StatefulDoFnRunner.CleanupTimer} implemented via {@link TimerInternals}. */
public static class TimeInternalsCleanupTimer<InputT>
implements StatefulDoFnRunner.CleanupTimer<InputT> {
public static final String GC_TIMER_ID = "__StatefulParDoGcTimerId";
/**
* The amount of milliseconds by which to delay cleanup. We use this to ensure that state is
* still available when a user timer for {@code window.maxTimestamp()} fires.
*/
public static final long GC_DELAY_MS = 1;
private final TimerInternals timerInternals;
private final WindowingStrategy<?, ?> windowingStrategy;
private final Coder<BoundedWindow> windowCoder;
public TimeInternalsCleanupTimer(
TimerInternals timerInternals, WindowingStrategy<?, ?> windowingStrategy) {
this.windowingStrategy = windowingStrategy;
WindowFn<?, ?> windowFn = windowingStrategy.getWindowFn();
windowCoder = (Coder<BoundedWindow>) windowFn.windowCoder();
this.timerInternals = timerInternals;
}
@Override
public void setForWindow(InputT input, BoundedWindow window) {
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
gcTime = gcTime.plus(GC_DELAY_MS);
timerInternals.setTimer(
StateNamespaces.window(windowCoder, window),
GC_TIMER_ID,
"",
gcTime,
window.maxTimestamp(),
TimeDomain.EVENT_TIME);
}
@Override
public boolean isForWindow(
String timerId, BoundedWindow window, Instant timestamp, TimeDomain timeDomain) {
boolean isEventTimer = timeDomain.equals(TimeDomain.EVENT_TIME);
Instant gcTime = LateDataUtils.garbageCollectionTime(window, windowingStrategy);
gcTime = gcTime.plus(GC_DELAY_MS);
return isEventTimer && GC_TIMER_ID.equals(timerId) && gcTime.equals(timestamp);
}
}
/** A {@link StatefulDoFnRunner.StateCleaner} implemented via {@link StateInternals}. */
public static class StateInternalsStateCleaner<W extends BoundedWindow>
implements StatefulDoFnRunner.StateCleaner<W> {
private final DoFn<?, ?> fn;
private final DoFnSignature signature;
private final StateInternals stateInternals;
private final Coder<W> windowCoder;
public StateInternalsStateCleaner(
DoFn<?, ?> fn, StateInternals stateInternals, Coder<W> windowCoder) {
this.fn = fn;
this.signature = DoFnSignatures.getSignature(fn.getClass());
this.stateInternals = stateInternals;
this.windowCoder = windowCoder;
}
@Override
public void clearForWindow(W window) {
for (Map.Entry<String, DoFnSignature.StateDeclaration> entry :
signature.stateDeclarations().entrySet()) {
try {
StateSpec<?> spec = (StateSpec<?>) entry.getValue().field().get(fn);
State state =
stateInternals.state(
StateNamespaces.window(windowCoder, window),
StateTags.tagForSpec(entry.getKey(), (StateSpec) spec));
state.clear();
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
}
|
`jacksonFactory.toString` declares to throw IOException.
|
public static void assertEqualsAsJson(Object expected, Object actual) {
try {
String expectedJsonText =
expected instanceof String ? (String) expected : jacksonFactory.toString(expected);
String actualJsonText = jacksonFactory.toString(actual);
JSONAssert.assertEquals(expectedJsonText, actualJsonText, true);
} catch (JSONException ex) {
throw new IllegalArgumentException("Could not parse JSON", ex);
} catch (IOException ex) {
throw new IllegalArgumentException("Could not generate JSON text", ex);
}
}
|
} catch (IOException ex) {
|
public static void assertEqualsAsJson(Object expected, Object actual) {
try {
String expectedJsonText =
expected instanceof String ? (String) expected : jacksonFactory.toString(expected);
String actualJsonText = jacksonFactory.toString(actual);
JSONAssert.assertEquals(expectedJsonText, actualJsonText, true);
} catch (JSONException ex) {
throw new IllegalArgumentException("Could not parse JSON", ex);
} catch (IOException ex) {
throw new IllegalArgumentException("Could not generate JSON text", ex);
}
}
|
class GenericJsonAssert {
private static final JacksonFactory jacksonFactory = JacksonFactory.getDefaultInstance();
/**
* Asserts that {@code actual} has the same JSON representation as {@code expected}.
*
* @param expected expected JSON string, {@link GenericJson}, {@link java.util.Map}, or {@link
* Iterable} of {@link GenericJson}.
* @param actual actual object to compare its JSON representation.
*/
}
|
class GenericJsonAssert {
private static final JacksonFactory jacksonFactory = JacksonFactory.getDefaultInstance();
/**
* Asserts that {@code actual} has the same JSON representation as {@code expected}.
*
* @param expected expected JSON string, {@link GenericJson}, {@link java.util.Map}, or {@link
* Iterable} of {@link GenericJson}.
* @param actual actual object to compare its JSON representation.
*/
}
|
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) to avoid finally clause.
|
public void testCalculateTotalBuffersSize() throws IOException {
int numberOfRemoteChannels = 2;
int numberOfLocalChannels = 0;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
CloseableRegistry closeableRegistry = new CloseableRegistry();
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(buffersPerChannel)
.setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate)
.build();
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> tuple1 = buildInputGate(
network,
numberOfRemoteChannels,
numberOfLocalChannels);
SingleInputGate inputGate1 = tuple1.f0;
closeableRegistry.registerCloseable(network::close);
closeableRegistry.registerCloseable(inputGate1::close);
try {
SingleInputGate[] inputGates = new SingleInputGate[]{inputGate1};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBufferPoolUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
assertEquals(extraNetworkBuffersPerGate, floatingBuffersUsageGauge.calculateTotalBuffers(inputGate1));
assertEquals(numberOfRemoteChannels * buffersPerChannel, exclusiveBuffersUsageGauge.calculateTotalBuffers(inputGate1));
assertEquals(numberOfRemoteChannels * buffersPerChannel + extraNetworkBuffersPerGate, inputBufferPoolUsageGauge.calculateTotalBuffers(inputGate1));
} finally {
closeableRegistry.close();
}
}
|
try {
|
public void testCalculateTotalBuffersSize() throws IOException {
int numberOfRemoteChannels = 2;
int numberOfLocalChannels = 0;
int numberOfBufferPerChannel = 2;
int numberOfBuffersPerGate = 8;
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(numberOfBufferPerChannel)
.setFloatingNetworkBuffersPerGate(numberOfBuffersPerGate)
.build();
SingleInputGate inputGate1 = buildInputGate(
network,
numberOfRemoteChannels,
numberOfLocalChannels).f0;
SingleInputGate[] inputGates = new SingleInputGate[]{inputGate1};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBufferPoolUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
closeableRegistry.registerCloseable(network::close);
closeableRegistry.registerCloseable(inputGate1::close);
assertEquals(numberOfBuffersPerGate, floatingBuffersUsageGauge.calculateTotalBuffers(inputGate1));
assertEquals(numberOfRemoteChannels * numberOfBufferPerChannel, exclusiveBuffersUsageGauge.calculateTotalBuffers(inputGate1));
assertEquals(numberOfRemoteChannels * numberOfBufferPerChannel + numberOfBuffersPerGate, inputBufferPoolUsageGauge.calculateTotalBuffers(inputGate1));
}
}
|
class InputBuffersMetricsTest extends TestLogger {
@Test
@Test
public void testExclusiveBuffersUsage() throws IOException {
int numberOfRemoteChannelsGate1 = 2;
int numberOfLocalChannelsGate1 = 0;
int numberOfRemoteChannelsGate2 = 1;
int numberOfLocalChannelsGate2 = 1;
int numberOfRemoteChannelsTotal = numberOfRemoteChannelsGate1 + numberOfRemoteChannelsGate2;
int numberOfInputGates = 2;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
CloseableRegistry closeableRegistry = new CloseableRegistry();
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(buffersPerChannel)
.setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate)
.build();
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> tuple1 = buildInputGate(
network,
numberOfRemoteChannelsGate1,
numberOfLocalChannelsGate1);
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> tuple2 = buildInputGate(
network,
numberOfRemoteChannelsGate2,
numberOfLocalChannelsGate2);
SingleInputGate inputGate1 = tuple1.f0;
SingleInputGate inputGate2 = tuple2.f0;
try {
assertEquals(tuple1.f1.size(), numberOfRemoteChannelsGate1 + numberOfLocalChannelsGate1);
RemoteInputChannel remoteInputChannel1 = tuple1.f1.get(0);
RemoteInputChannel remoteInputChannel2 = tuple1.f1.get(1);
SingleInputGate[] inputGates = new SingleInputGate[]{tuple1.f0, tuple2.f0};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
assertEquals(0.0, exclusiveBuffersUsageGauge.getValue(), 0.0);
assertEquals(0.0, inputBuffersUsageGauge.getValue(), 0.0);
drainBuffer(buffersPerChannel, remoteInputChannel1, closeableRegistry);
int totalBuffers = extraNetworkBuffersPerGate * numberOfInputGates + buffersPerChannel * numberOfRemoteChannelsTotal;
assertEquals(buffersPerChannel, exclusiveBuffersUsageGauge.calculateUsedBuffers(inputGate1));
assertEquals((double) buffersPerChannel / (buffersPerChannel * numberOfRemoteChannelsTotal), exclusiveBuffersUsageGauge.getValue(), 0.0001);
assertEquals((double) buffersPerChannel / totalBuffers, inputBuffersUsageGauge.getValue(), 0.0001);
drainBuffer(buffersPerChannel, remoteInputChannel2, closeableRegistry);
assertEquals(buffersPerChannel * numberOfRemoteChannelsGate1, exclusiveBuffersUsageGauge.calculateUsedBuffers(inputGate1));
assertEquals((double) buffersPerChannel * numberOfRemoteChannelsGate1 / (buffersPerChannel * numberOfRemoteChannelsTotal),
exclusiveBuffersUsageGauge.getValue(), 0.0001);
assertEquals((double) buffersPerChannel * numberOfRemoteChannelsGate1 / totalBuffers,
inputBuffersUsageGauge.getValue(), 0.0001);
} finally {
inputGate1.close();
inputGate2.close();
closeableRegistry.close();
network.close();
}
}
@Test
public void testFloatingBuffersUsage() throws IOException, InterruptedException {
int numberOfRemoteChannelsGate1 = 2;
int numberOfLocalChannelsGate1 = 0;
int numberOfRemoteChannelsGate2 = 1;
int numberOfLocalChannelsGate2 = 1;
int numberOfRemoteChannelsTotal = numberOfRemoteChannelsGate1 + numberOfRemoteChannelsGate2;
int numberOfInputGates = 2;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
CloseableRegistry closeableRegistry = new CloseableRegistry();
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(buffersPerChannel)
.setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate)
.build();
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> tuple1 = buildInputGate(
network,
numberOfRemoteChannelsGate1,
numberOfLocalChannelsGate1);
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> tuple2 = buildInputGate(
network,
numberOfRemoteChannelsGate2,
numberOfLocalChannelsGate2);
SingleInputGate inputGate1 = tuple1.f0;
SingleInputGate inputGate2 = tuple2.f0;
try {
assertEquals(tuple1.f1.size(), numberOfRemoteChannelsGate1 + numberOfLocalChannelsGate1);
RemoteInputChannel remoteInputChannel1 = tuple1.f1.get(0);
RemoteInputChannel remoteInputChannel2 = tuple1.f1.get(1);
SingleInputGate[] inputGates = new SingleInputGate[]{tuple1.f0, tuple2.f0};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
assertEquals(0.0, floatingBuffersUsageGauge.getValue(), 0.0);
assertEquals(0.0, inputBuffersUsageGauge.getValue(), 0.0);
drainBuffer(buffersPerChannel, remoteInputChannel1, closeableRegistry);
drainBuffer(buffersPerChannel, remoteInputChannel2, closeableRegistry);
int totalBuffers = extraNetworkBuffersPerGate * numberOfInputGates + buffersPerChannel * numberOfRemoteChannelsTotal;
remoteInputChannel1.requestSubpartition(0);
int backlog = 3;
int requestFloatingTotal = buffersPerChannel + backlog;
remoteInputChannel1.onSenderBacklog(backlog);
assertEquals(requestFloatingTotal, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
drainBuffer(requestFloatingTotal, remoteInputChannel1, closeableRegistry);
assertEquals(0, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
assertEquals((double) (buffersPerChannel * numberOfRemoteChannelsGate1 + requestFloatingTotal) / totalBuffers,
inputBuffersUsageGauge.getValue(), 0.0001);
} finally {
inputGate1.close();
inputGate2.close();
closeableRegistry.close();
network.close();
}
}
private void drainBuffer(int boundary, RemoteInputChannel channel, CloseableRegistry closeableRegistry) throws IOException {
for (int i = 0; i < boundary; i++) {
Buffer buffer = channel.requestBuffer();
if (buffer != null) {
closeableRegistry.registerCloseable(buffer::recycleBuffer);
} else {
break;
}
}
}
private RemoteInputChannel buildRemoteChannel(
int channelIndex,
ResultPartitionID partitionID,
SingleInputGate inputGate,
NettyShuffleEnvironment network) {
return new InputChannelBuilder()
.setChannelIndex(channelIndex)
.setPartitionId(partitionID)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
}
private LocalInputChannel buildLocalChannel(
int channelIndex,
ResultPartitionID partitionID,
SingleInputGate inputGate,
NettyShuffleEnvironment network) {
return new InputChannelBuilder()
.setChannelIndex(channelIndex)
.setPartitionId(partitionID)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildLocalAndSetToGate(inputGate);
}
private Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> buildInputGate(
NettyShuffleEnvironment network,
int remoteChannelNum,
int localChannelNum) throws IOException {
SingleInputGate inputGate = new SingleInputGateBuilder()
.setNumberOfChannels(remoteChannelNum + localChannelNum)
.setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED)
.setupBufferPoolFactory(network)
.build();
Tuple3<SingleInputGate, List<RemoteInputChannel>, List<LocalInputChannel>> res = Tuple3.of(inputGate, new ArrayList<>(), new ArrayList<>());
int channelIdx = 0;
for (int i = 0; i < remoteChannelNum; i++) {
res.f1.add(buildRemoteChannel(channelIdx, new ResultPartitionID(), inputGate, network));
channelIdx++;
}
for (int i = 0; i < localChannelNum; i++) {
res.f2.add(buildLocalChannel(channelIdx, new ResultPartitionID(), inputGate, network));
channelIdx++;
}
inputGate.setup();
return res;
}
}
|
class InputBuffersMetricsTest extends TestLogger {
@Test
@Test
public void testExclusiveBuffersUsage() throws IOException {
int numberOfRemoteChannelsGate1 = 2;
int numberOfLocalChannelsGate1 = 0;
int numberOfRemoteChannelsGate2 = 1;
int numberOfLocalChannelsGate2 = 1;
int totalNumberOfRemoteChannels = numberOfRemoteChannelsGate1 + numberOfRemoteChannelsGate2;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(buffersPerChannel)
.setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate)
.build();
Tuple2<SingleInputGate, List<RemoteInputChannel>> tuple1 = buildInputGate(
network,
numberOfRemoteChannelsGate1,
numberOfLocalChannelsGate1);
Tuple2<SingleInputGate, List<RemoteInputChannel>> tuple2 = buildInputGate(
network,
numberOfRemoteChannelsGate2,
numberOfLocalChannelsGate2);
SingleInputGate inputGate1 = tuple1.f0;
SingleInputGate inputGate2 = tuple2.f0;
List<RemoteInputChannel> remoteInputChannels = tuple1.f1;
SingleInputGate[] inputGates = new SingleInputGate[]{tuple1.f0, tuple2.f0};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
assertEquals(0.0, exclusiveBuffersUsageGauge.getValue(), 0.0);
assertEquals(0.0, inputBuffersUsageGauge.getValue(), 0.0);
int totalBuffers = extraNetworkBuffersPerGate * inputGates.length + buffersPerChannel * totalNumberOfRemoteChannels;
int channelIndex = 1;
for (RemoteInputChannel channel : remoteInputChannels) {
drainAndValidate(
buffersPerChannel,
buffersPerChannel * channelIndex++,
channel,
closeableRegistry,
totalBuffers,
buffersPerChannel * totalNumberOfRemoteChannels,
exclusiveBuffersUsageGauge,
inputBuffersUsageGauge,
inputGate1);
}
} finally {
inputGate1.close();
inputGate2.close();
network.close();
}
}
@Test
public void testFloatingBuffersUsage() throws IOException, InterruptedException {
int numberOfRemoteChannelsGate1 = 2;
int numberOfLocalChannelsGate1 = 0;
int numberOfRemoteChannelsGate2 = 1;
int numberOfLocalChannelsGate2 = 1;
int totalNumberOfRemoteChannels = numberOfRemoteChannelsGate1 + numberOfRemoteChannelsGate2;
int buffersPerChannel = 2;
int extraNetworkBuffersPerGate = 8;
NettyShuffleEnvironment network = new NettyShuffleEnvironmentBuilder()
.setNetworkBuffersPerChannel(buffersPerChannel)
.setFloatingNetworkBuffersPerGate(extraNetworkBuffersPerGate)
.build();
Tuple2<SingleInputGate, List<RemoteInputChannel>> tuple1 = buildInputGate(
network,
numberOfRemoteChannelsGate1,
numberOfLocalChannelsGate1);
SingleInputGate inputGate2 = buildInputGate(
network,
numberOfRemoteChannelsGate2,
numberOfLocalChannelsGate2).f0;
SingleInputGate inputGate1 = tuple1.f0;
RemoteInputChannel remoteInputChannel1 = tuple1.f1.get(0);
SingleInputGate[] inputGates = new SingleInputGate[]{tuple1.f0, inputGate2};
FloatingBuffersUsageGauge floatingBuffersUsageGauge = new FloatingBuffersUsageGauge(inputGates);
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge = new ExclusiveBuffersUsageGauge(inputGates);
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge = new CreditBasedInputBuffersUsageGauge(
floatingBuffersUsageGauge,
exclusiveBuffersUsageGauge,
inputGates);
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
assertEquals(0.0, floatingBuffersUsageGauge.getValue(), 0.0);
assertEquals(0.0, inputBuffersUsageGauge.getValue(), 0.0);
drainBuffer(buffersPerChannel, remoteInputChannel1, closeableRegistry);
int totalBuffers = extraNetworkBuffersPerGate * inputGates.length + buffersPerChannel * totalNumberOfRemoteChannels;
remoteInputChannel1.requestSubpartition(0);
int backlog = 3;
int totalRequestedBuffers = buffersPerChannel + backlog;
remoteInputChannel1.onSenderBacklog(backlog);
assertEquals(totalRequestedBuffers, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
drainBuffer(totalRequestedBuffers, remoteInputChannel1, closeableRegistry);
assertEquals(0, remoteInputChannel1.unsynchronizedGetFloatingBuffersAvailable());
assertEquals((double) (buffersPerChannel + totalRequestedBuffers) / totalBuffers,
inputBuffersUsageGauge.getValue(), 0.0001);
} finally {
inputGate1.close();
inputGate2.close();
network.close();
}
}
private void drainAndValidate(
int numBuffersToRequest,
int totalRequestedBuffers,
RemoteInputChannel channel,
CloseableRegistry closeableRegistry,
int totalBuffers,
int totalExclusiveBuffers,
ExclusiveBuffersUsageGauge exclusiveBuffersUsageGauge,
CreditBasedInputBuffersUsageGauge inputBuffersUsageGauge,
SingleInputGate inputGate) throws IOException {
drainBuffer(numBuffersToRequest, channel, closeableRegistry);
assertEquals(totalRequestedBuffers, exclusiveBuffersUsageGauge.calculateUsedBuffers(inputGate));
assertEquals((double) totalRequestedBuffers / totalExclusiveBuffers, exclusiveBuffersUsageGauge.getValue(), 0.0001);
assertEquals((double) totalRequestedBuffers / totalBuffers, inputBuffersUsageGauge.getValue(), 0.0001);
}
private void drainBuffer(int boundary, RemoteInputChannel channel, CloseableRegistry closeableRegistry) throws IOException {
for (int i = 0; i < boundary; i++) {
Buffer buffer = channel.requestBuffer();
if (buffer != null) {
closeableRegistry.registerCloseable(buffer::recycleBuffer);
} else {
break;
}
}
}
private Tuple2<SingleInputGate, List<RemoteInputChannel>> buildInputGate(
NettyShuffleEnvironment network,
int numberOfRemoteChannels,
int numberOfLocalChannels) throws IOException {
SingleInputGate inputGate = new SingleInputGateBuilder()
.setNumberOfChannels(numberOfRemoteChannels + numberOfLocalChannels)
.setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED)
.setupBufferPoolFactory(network)
.build();
Tuple2<SingleInputGate, List<RemoteInputChannel>> res = Tuple2.of(inputGate, new ArrayList<>());
int channelIdx = 0;
for (int i = 0; i < numberOfRemoteChannels; i++) {
res.f1.add(buildRemoteChannel(channelIdx, inputGate, network));
channelIdx++;
}
for (int i = 0; i < numberOfLocalChannels; i++) {
buildLocalChannel(channelIdx, inputGate, network);
}
inputGate.setup();
return res;
}
private RemoteInputChannel buildRemoteChannel(
int channelIndex,
SingleInputGate inputGate,
NettyShuffleEnvironment network) {
return new InputChannelBuilder()
.setChannelIndex(channelIndex)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildRemoteAndSetToGate(inputGate);
}
private void buildLocalChannel(
int channelIndex,
SingleInputGate inputGate,
NettyShuffleEnvironment network) {
new InputChannelBuilder()
.setChannelIndex(channelIndex)
.setupFromNettyShuffleEnvironment(network)
.setConnectionManager(new TestingConnectionManager())
.buildLocalAndSetToGate(inputGate);
}
}
|
Yes, I also think this is the case. I'm glad this helped a little bit. :)
|
public void testReadPublicData() throws Exception {
PCollection<PubsubMessage> messages =
pipeline.apply(
PubsubIO.readMessages()
.fromTopic("projects/pubsub-public-data/topics/taxirides-realtime"));
messages.apply(
"waitForAnyMessage", signal.signalSuccessWhen(messages.getCoder(), anyMessage -> true));
Duration timeout =
pipeline.getOptions().getRunner().getSimpleName().endsWith("DataflowRunner")
? Duration.standardMinutes(5)
: Duration.standardSeconds(30);
signal.waitForSuccess(timeout);
}
|
.fromTopic("projects/pubsub-public-data/topics/taxirides-realtime"));
|
public void testReadPublicData() throws Exception {
pipeline.getOptions().as(DirectOptions.class).setBlockOnRun(false);
PCollection<String> messages =
pipeline.apply(
PubsubIO.readStrings()
.fromTopic("projects/pubsub-public-data/topics/taxirides-realtime"));
messages.apply(
"waitForAnyMessage", signal.signalSuccessWhen(messages.getCoder(), anyMessages -> true));
Supplier<Void> start = signal.waitForStart(Duration.standardMinutes(5));
pipeline.apply(signal.signalStart());
PipelineResult job = pipeline.run();
start.get();
signal.waitForSuccess(Duration.standardSeconds(30));
try {
job.cancel();
} catch (UnsupportedOperationException exc) {
}
}
|
class PubsubReadIT {
@Rule public transient TestPubsubSignal signal = TestPubsubSignal.create();
@Rule public transient TestPipeline pipeline = TestPipeline.create();
@Test
}
|
class PubsubReadIT {
@Rule public transient TestPubsubSignal signal = TestPubsubSignal.create();
@Rule public transient TestPipeline pipeline = TestPipeline.create();
@Test
}
|
Shall we introduce utility methods `isFlagSet` and `withFlag` to hide the bit manipulation logic?
|
private void updateDiagnostics(STNode[] children) {
for (STNode child : children) {
if (!SyntaxUtils.isSTNodePresent(child)) {
continue;
}
if ((child.flags & STNodeFlags.HAS_DIAGNOSTIC) != 0) {
this.flags |= STNodeFlags.HAS_DIAGNOSTIC;
return;
}
}
}
|
if ((child.flags & STNodeFlags.HAS_DIAGNOSTIC) != 0) {
|
private void updateDiagnostics(STNode[] children) {
for (STNode child : children) {
if (!SyntaxUtils.isSTNodePresent(child)) {
continue;
}
if (STNodeFlags.isFlagSet(child.flags, STNodeFlags.HAS_DIAGNOSTIC)) {
this.flags = STNodeFlags.withFlag(this.flags, STNodeFlags.HAS_DIAGNOSTIC);
return;
}
}
}
|
class STNode {
public final SyntaxKind kind;
protected final Collection<STNodeDiagnostic> diagnostics;
protected int width;
protected int widthWithLeadingMinutiae;
protected int widthWithTrailingMinutiae;
protected int widthWithMinutiae;
protected byte flags = 0;
protected static final STNode[] EMPTY_BUCKET = new STNode[0];
protected int bucketCount;
protected STNode[] childBuckets = EMPTY_BUCKET;
STNode(SyntaxKind kind) {
this.kind = kind;
this.diagnostics = Collections.emptyList();
}
STNode(SyntaxKind kind, Collection<STNodeDiagnostic> diagnostics, boolean isMissing) {
this(kind, diagnostics);
if (isMissing) {
flags |= STNodeFlags.IS_MISSING;
}
}
STNode(SyntaxKind kind, Collection<STNodeDiagnostic> diagnostics) {
this.kind = kind;
this.diagnostics = diagnostics;
if (diagnostics.size() > 0) {
flags |= STNodeFlags.HAS_DIAGNOSTIC;
}
}
public STNode childInBucket(int bucket) {
return childBuckets[bucket];
}
public int widthWithMinutiae() {
return widthWithMinutiae;
}
public int width() {
return width;
}
public int widthWithLeadingMinutiae() {
return widthWithLeadingMinutiae;
}
public int widthWithTrailingMinutiae() {
return widthWithTrailingMinutiae;
}
public STNode leadingMinutiae() {
throw new UnsupportedOperationException("" +
"The leadingMinutiae() method is only supported for STToken instances");
}
public STNode trailingMinutiae() {
throw new UnsupportedOperationException("" +
"The trailingMinutiae() method is only supported for STToken instances");
}
public boolean hasDiagnostics() {
return (flags & STNodeFlags.HAS_DIAGNOSTIC) != 0;
}
public Collection<STNodeDiagnostic> diagnostics() {
return Collections.unmodifiableCollection(this.diagnostics);
}
public int bucketCount() {
return bucketCount;
}
public boolean isMissing() {
return this instanceof STMissingToken;
}
public List<STToken> tokens() {
List<STToken> tokens = new ArrayList<>();
tokensInternal(tokens);
return Collections.unmodifiableList(tokens);
}
protected void tokensInternal(List<STToken> tokens) {
for (STNode child : childBuckets) {
if (SyntaxUtils.isSTNodePresent(child)) {
child.tokensInternal(tokens);
}
}
}
public STToken firstToken() {
return (STToken) firstTokenInternal();
}
protected STNode firstTokenInternal() {
for (STNode child : childBuckets) {
if (SyntaxUtils.isToken(child)) {
return child;
}
if (!SyntaxUtils.isSTNodePresent(child) ||
NodeListUtils.isSTNodeList(child) && child.bucketCount == 0) {
continue;
}
STNode firstToken = child.firstTokenInternal();
if (SyntaxUtils.isSTNodePresent(firstToken)) {
return firstToken;
}
}
return null;
}
public STToken lastToken() {
return (STToken) lastTokenInternal();
}
protected STNode lastTokenInternal() {
for (int bucket = childBuckets.length - 1; bucket >= 0; bucket--) {
STNode child = childInBucket(bucket);
if (SyntaxUtils.isToken(child)) {
return child;
}
if (!SyntaxUtils.isSTNodePresent(child) ||
NodeListUtils.isSTNodeList(child) && child.bucketCount == 0) {
continue;
}
STNode lastToken = child.lastTokenInternal();
if (SyntaxUtils.isSTNodePresent(lastToken)) {
return lastToken;
}
}
return null;
}
public abstract STNode modifyWith(Collection<STNodeDiagnostic> diagnostics);
/**
* Replaces the given target node with the replacement.
*
* @param target the node to be replaced
* @param replacement the replacement node
* @param <T> the type of the root node
* @return return the new root node after replacing the target with the replacement
*/
public <T extends STNode> T replace(STNode target, STNode replacement) {
return STTreeModifiers.replace((T) this, target, replacement);
}
@SuppressWarnings("unchecked")
public <T extends Node> T createUnlinkedFacade() {
return (T) createFacade(0, null);
}
public abstract Node createFacade(int position, NonTerminalNode parent);
/**
* Accepts an instance of the {@code STNodeVisitor}, which can be used to
* traverse the syntax tree.
*
* @param visitor an instance of the {@code STNodeVisitor}
*/
public abstract void accept(STNodeVisitor visitor);
/**
* Applies the given {@code STNodeTransformer} to this node and returns
* the transformed object.
*
* @param transformer an instance of the {@code STNodeTransformer}
* @param <T> the type of transformed object
* @return the transformed object
*/
public abstract <T> T apply(STNodeTransformer<T> transformer);
public String toString() {
StringBuilder sb = new StringBuilder();
for (STNode child : this.childBuckets) {
sb.append(child != null ? child.toString() : "");
}
return sb.toString();
}
public void writeTo(StringBuilder builder) {
for (STNode child : this.childBuckets) {
if (SyntaxUtils.isSTNodePresent(child)) {
child.writeTo(builder);
}
}
}
public String toSourceCode() {
StringBuilder builder = new StringBuilder();
writeTo(builder);
return builder.toString();
}
protected void addChildren(STNode... children) {
this.bucketCount = children.length;
this.childBuckets = children;
if (bucketCount == 0) {
return;
}
updateDiagnostics(children);
updateWidth(children);
}
protected boolean checkForReferenceEquality(STNode... children) {
for (int index = 0; index < children.length; index++) {
if (childBuckets[index] != children[index]) {
return false;
}
}
return true;
}
/**
* Update various field properties during the internal node creation time.
* <p>
* Here we update following four width related fields.
* width - width of the node without minutiae
* widthWithLeadingMinutiae - width of the node with only the leading minutiae
* widthWithTrailingMinutiae - width of the node with only the trailing minutiae
* widthWithMinutiae - width of the node with both leading and trailing minutiae
* <p>
* All the above fields are required to calculate the absolute location of the node in a text document
* during the external tree node creation time. Since our plan is to reuse many
* internal tree nodes, I think it should be fine to update the width fields now.
*
* @param children child nodes of this node
*/
private void updateWidth(STNode[] children) {
int firstChildIndex = getFirstChildIndex(children);
if (firstChildIndex == -1) {
return;
}
int lastChildIndex = getLastChildIndex(children);
STNode firstChild = children[firstChildIndex];
STNode lastChild = children[lastChildIndex];
if (firstChildIndex == lastChildIndex) {
this.width = firstChild.width;
this.widthWithLeadingMinutiae = firstChild.widthWithLeadingMinutiae;
this.widthWithTrailingMinutiae = firstChild.widthWithTrailingMinutiae;
this.widthWithMinutiae = firstChild.widthWithMinutiae;
return;
}
this.width = firstChild.widthWithTrailingMinutiae + lastChild.widthWithLeadingMinutiae;
this.widthWithLeadingMinutiae = firstChild.widthWithMinutiae + lastChild.widthWithLeadingMinutiae;
this.widthWithTrailingMinutiae = firstChild.widthWithTrailingMinutiae + lastChild.widthWithMinutiae;
this.widthWithMinutiae = firstChild.widthWithMinutiae + lastChild.widthWithMinutiae;
updateWidth(children, firstChildIndex, lastChildIndex);
}
private void updateWidth(STNode[] children, int firstChildIndex, int lastChildIndex) {
for (int index = firstChildIndex + 1; index < lastChildIndex; index++) {
STNode child = children[index];
if (!SyntaxUtils.isSTNodePresent(children[index])) {
continue;
}
this.width += child.widthWithMinutiae;
this.widthWithLeadingMinutiae += child.widthWithMinutiae;
this.widthWithTrailingMinutiae += child.widthWithMinutiae;
this.widthWithMinutiae += child.widthWithMinutiae;
}
}
private int getFirstChildIndex(STNode... children) {
for (int index = 0; index < children.length; index++) {
STNode child = children[index];
if (SyntaxUtils.isSTNodePresent(child) && child.widthWithMinutiae != 0) {
return index;
}
}
return -1;
}
private int getLastChildIndex(STNode... children) {
for (int index = children.length - 1; index >= 0; index--) {
STNode child = children[index];
if (SyntaxUtils.isSTNodePresent(child) && child.widthWithMinutiae != 0) {
return index;
}
}
return -1;
}
}
|
class STNode {
public final SyntaxKind kind;
protected final Collection<STNodeDiagnostic> diagnostics;
protected int width;
protected int widthWithLeadingMinutiae;
protected int widthWithTrailingMinutiae;
protected int widthWithMinutiae;
protected byte flags = 0;
protected static final STNode[] EMPTY_BUCKET = new STNode[0];
protected int bucketCount;
protected STNode[] childBuckets = EMPTY_BUCKET;
STNode(SyntaxKind kind) {
this.kind = kind;
this.diagnostics = Collections.emptyList();
}
STNode(SyntaxKind kind, Collection<STNodeDiagnostic> diagnostics, boolean isMissing) {
this(kind, diagnostics);
if (isMissing) {
flags = STNodeFlags.withFlag(flags, STNodeFlags.IS_MISSING);
}
}
STNode(SyntaxKind kind, Collection<STNodeDiagnostic> diagnostics) {
this.kind = kind;
this.diagnostics = diagnostics;
if (diagnostics.size() > 0) {
flags = STNodeFlags.withFlag(flags, STNodeFlags.HAS_DIAGNOSTIC);
}
}
public STNode childInBucket(int bucket) {
return childBuckets[bucket];
}
public int widthWithMinutiae() {
return widthWithMinutiae;
}
public int width() {
return width;
}
public int widthWithLeadingMinutiae() {
return widthWithLeadingMinutiae;
}
public int widthWithTrailingMinutiae() {
return widthWithTrailingMinutiae;
}
public STNode leadingMinutiae() {
throw new UnsupportedOperationException("" +
"The leadingMinutiae() method is only supported for STToken instances");
}
public STNode trailingMinutiae() {
throw new UnsupportedOperationException("" +
"The trailingMinutiae() method is only supported for STToken instances");
}
public boolean hasDiagnostics() {
return STNodeFlags.isFlagSet(flags, STNodeFlags.HAS_DIAGNOSTIC);
}
public Collection<STNodeDiagnostic> diagnostics() {
return Collections.unmodifiableCollection(this.diagnostics);
}
public int bucketCount() {
return bucketCount;
}
public boolean isMissing() {
return this instanceof STMissingToken;
}
public List<STToken> tokens() {
List<STToken> tokens = new ArrayList<>();
tokensInternal(tokens);
return Collections.unmodifiableList(tokens);
}
protected void tokensInternal(List<STToken> tokens) {
for (STNode child : childBuckets) {
if (SyntaxUtils.isSTNodePresent(child)) {
child.tokensInternal(tokens);
}
}
}
public STToken firstToken() {
return (STToken) firstTokenInternal();
}
protected STNode firstTokenInternal() {
for (STNode child : childBuckets) {
if (SyntaxUtils.isToken(child)) {
return child;
}
if (!SyntaxUtils.isSTNodePresent(child) ||
NodeListUtils.isSTNodeList(child) && child.bucketCount == 0) {
continue;
}
STNode firstToken = child.firstTokenInternal();
if (SyntaxUtils.isSTNodePresent(firstToken)) {
return firstToken;
}
}
return null;
}
public STToken lastToken() {
return (STToken) lastTokenInternal();
}
protected STNode lastTokenInternal() {
for (int bucket = childBuckets.length - 1; bucket >= 0; bucket--) {
STNode child = childInBucket(bucket);
if (SyntaxUtils.isToken(child)) {
return child;
}
if (!SyntaxUtils.isSTNodePresent(child) ||
NodeListUtils.isSTNodeList(child) && child.bucketCount == 0) {
continue;
}
STNode lastToken = child.lastTokenInternal();
if (SyntaxUtils.isSTNodePresent(lastToken)) {
return lastToken;
}
}
return null;
}
public abstract STNode modifyWith(Collection<STNodeDiagnostic> diagnostics);
/**
* Replaces the given target node with the replacement.
*
* @param target the node to be replaced
* @param replacement the replacement node
* @param <T> the type of the root node
* @return return the new root node after replacing the target with the replacement
*/
public <T extends STNode> T replace(STNode target, STNode replacement) {
return STTreeModifiers.replace((T) this, target, replacement);
}
@SuppressWarnings("unchecked")
public <T extends Node> T createUnlinkedFacade() {
return (T) createFacade(0, null);
}
public abstract Node createFacade(int position, NonTerminalNode parent);
/**
* Accepts an instance of the {@code STNodeVisitor}, which can be used to
* traverse the syntax tree.
*
* @param visitor an instance of the {@code STNodeVisitor}
*/
public abstract void accept(STNodeVisitor visitor);
/**
* Applies the given {@code STNodeTransformer} to this node and returns
* the transformed object.
*
* @param transformer an instance of the {@code STNodeTransformer}
* @param <T> the type of transformed object
* @return the transformed object
*/
public abstract <T> T apply(STNodeTransformer<T> transformer);
public String toString() {
StringBuilder sb = new StringBuilder();
for (STNode child : this.childBuckets) {
sb.append(child != null ? child.toString() : "");
}
return sb.toString();
}
public void writeTo(StringBuilder builder) {
for (STNode child : this.childBuckets) {
if (SyntaxUtils.isSTNodePresent(child)) {
child.writeTo(builder);
}
}
}
public String toSourceCode() {
StringBuilder builder = new StringBuilder();
writeTo(builder);
return builder.toString();
}
protected void addChildren(STNode... children) {
this.bucketCount = children.length;
this.childBuckets = children;
if (bucketCount == 0) {
return;
}
updateDiagnostics(children);
updateWidth(children);
}
protected boolean checkForReferenceEquality(STNode... children) {
for (int index = 0; index < children.length; index++) {
if (childBuckets[index] != children[index]) {
return false;
}
}
return true;
}
/**
* Update various field properties during the internal node creation time.
* <p>
* Here we update following four width related fields.
* width - width of the node without minutiae
* widthWithLeadingMinutiae - width of the node with only the leading minutiae
* widthWithTrailingMinutiae - width of the node with only the trailing minutiae
* widthWithMinutiae - width of the node with both leading and trailing minutiae
* <p>
* All the above fields are required to calculate the absolute location of the node in a text document
* during the external tree node creation time. Since our plan is to reuse many
* internal tree nodes, I think it should be fine to update the width fields now.
*
* @param children child nodes of this node
*/
private void updateWidth(STNode[] children) {
int firstChildIndex = getFirstChildIndex(children);
if (firstChildIndex == -1) {
return;
}
int lastChildIndex = getLastChildIndex(children);
STNode firstChild = children[firstChildIndex];
STNode lastChild = children[lastChildIndex];
if (firstChildIndex == lastChildIndex) {
this.width = firstChild.width;
this.widthWithLeadingMinutiae = firstChild.widthWithLeadingMinutiae;
this.widthWithTrailingMinutiae = firstChild.widthWithTrailingMinutiae;
this.widthWithMinutiae = firstChild.widthWithMinutiae;
return;
}
this.width = firstChild.widthWithTrailingMinutiae + lastChild.widthWithLeadingMinutiae;
this.widthWithLeadingMinutiae = firstChild.widthWithMinutiae + lastChild.widthWithLeadingMinutiae;
this.widthWithTrailingMinutiae = firstChild.widthWithTrailingMinutiae + lastChild.widthWithMinutiae;
this.widthWithMinutiae = firstChild.widthWithMinutiae + lastChild.widthWithMinutiae;
updateWidth(children, firstChildIndex, lastChildIndex);
}
private void updateWidth(STNode[] children, int firstChildIndex, int lastChildIndex) {
for (int index = firstChildIndex + 1; index < lastChildIndex; index++) {
STNode child = children[index];
if (!SyntaxUtils.isSTNodePresent(children[index])) {
continue;
}
this.width += child.widthWithMinutiae;
this.widthWithLeadingMinutiae += child.widthWithMinutiae;
this.widthWithTrailingMinutiae += child.widthWithMinutiae;
this.widthWithMinutiae += child.widthWithMinutiae;
}
}
private int getFirstChildIndex(STNode... children) {
for (int index = 0; index < children.length; index++) {
STNode child = children[index];
if (SyntaxUtils.isSTNodePresent(child) && child.widthWithMinutiae != 0) {
return index;
}
}
return -1;
}
private int getLastChildIndex(STNode... children) {
for (int index = children.length - 1; index >= 0; index--) {
STNode child = children[index];
if (SyntaxUtils.isSTNodePresent(child) && child.widthWithMinutiae != 0) {
return index;
}
}
return -1;
}
}
|
I will do it, I noticed a few other small things that I will address.
|
public void testRootResource() {
RestAssured.get("/other/hello.txt").then()
.log().all()
.statusCode(200)
.body(is("hello"));
RestAssured.get("/stuff.html").then()
.log().all()
.statusCode(200)
.body(is("stuff"));
RestAssured.get("/index.html").then()
.log().all()
.statusCode(200)
.body(is("index"));
RestAssured.get("/").then()
.log().all()
.statusCode(200)
.body(is("index"));
}
|
.log().all()
|
public void testRootResource() {
RestAssured.get("/other/hello.txt").then()
.log().all()
.statusCode(200)
.body(is("hello"));
RestAssured.get("/stuff.html").then()
.log().all()
.statusCode(200)
.body(is("stuff"));
RestAssured.get("/index.html").then()
.log().all()
.statusCode(200)
.body(is("index"));
RestAssured.get("/").then()
.log().all()
.statusCode(200)
.body(is("index"));
}
|
class ClasspathResourceTestCase {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(RootResource.class)
.addAsManifestResource(new StringAsset("hello"), "resources/other/hello.txt")
.addAsManifestResource(new StringAsset("index"), "resources/index.html")
.addAsManifestResource(new StringAsset("stuff"), "resources/stuff.html"));
@Test
}
|
class ClasspathResourceTestCase {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(RootResource.class)
.addAsManifestResource(new StringAsset("hello"), "resources/other/hello.txt")
.addAsManifestResource(new StringAsset("index"), "resources/index.html")
.addAsManifestResource(new StringAsset("stuff"), "resources/stuff.html"));
@Test
}
|
`close` instead of `stop` in the `after` method
|
public void before() {
try {
testingServer = ZooKeeperTestUtils.createZookeeperTestingServer();
} catch (Exception e) {
throw new RuntimeException("Could not start ZooKeeper testing cluster.", e);
}
configuration = new Configuration();
configuration.setString(
HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString());
configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper");
curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
}
|
testingServer = ZooKeeperTestUtils.createZookeeperTestingServer();
|
public void before() {
try {
testingServer = ZooKeeperTestUtils.createAndStartZookeeperTestingServer();
} catch (Exception e) {
throw new RuntimeException("Could not start ZooKeeper testing cluster.", e);
}
configuration = new Configuration();
configuration.setString(
HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString());
configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper");
curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
}
|
class ZooKeeperLeaderElectionTest extends TestLogger {
private TestingServer testingServer;
private Configuration configuration;
private CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper;
private static final String LEADER_ADDRESS = "akka
private static final long timeout = 200L * 1000L;
private static final Logger LOG = LoggerFactory.getLogger(ZooKeeperLeaderElectionTest.class);
@Rule
public final TestingFatalErrorHandlerResource testingFatalErrorHandlerResource =
new TestingFatalErrorHandlerResource();
@Before
@After
public void after() throws IOException {
if (curatorFrameworkWrapper != null) {
curatorFrameworkWrapper.close();
curatorFrameworkWrapper = null;
}
if (testingServer != null) {
testingServer.stop();
testingServer = null;
}
}
/** Tests that the ZooKeeperLeaderElection/RetrievalService return both the correct URL. */
@Test
public void testZooKeeperLeaderElectionRetrieval() throws Exception {
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
LeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
retrievalEventHandler.waitForNewLeader();
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
}
}
/**
* Tests repeatedly the reelection of still available LeaderContender. After a contender has
* been elected as the leader, it is removed. This forces the DefaultLeaderElectionService to
* elect a new leader.
*/
@Test
public void testZooKeeperReelection() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5L));
int num = 10;
DefaultLeaderElectionService[] leaderElectionService =
new DefaultLeaderElectionService[num];
TestingContender[] contenders = new TestingContender[num];
DefaultLeaderRetrievalService leaderRetrievalService = null;
TestingListener listener = new TestingListener();
try {
leaderRetrievalService =
ZooKeeperUtils.createLeaderRetrievalService(
curatorFrameworkWrapper.asCuratorFramework());
LOG.debug("Start leader retrieval service for the TestingListener.");
leaderRetrievalService.start(listener);
for (int i = 0; i < num; i++) {
leaderElectionService[i] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[i] = new TestingContender(createAddress(i), leaderElectionService[i]);
LOG.debug("Start leader election service for contender
leaderElectionService[i].start(contenders[i]);
}
String pattern = LEADER_ADDRESS + "_" + "(\\d+)";
Pattern regex = Pattern.compile(pattern);
int numberSeenLeaders = 0;
while (deadline.hasTimeLeft() && numberSeenLeaders < num) {
LOG.debug("Wait for new leader
String address = listener.waitForNewLeader();
Matcher m = regex.matcher(address);
if (m.find()) {
int index = Integer.parseInt(m.group(1));
TestingContender contender = contenders[index];
if (address.equals(createAddress(index))
&& listener.getLeaderSessionID()
.equals(contender.getLeaderSessionID())) {
LOG.debug(
"Stop leader election service of contender
numberSeenLeaders);
leaderElectionService[index].stop();
leaderElectionService[index] = null;
numberSeenLeaders++;
}
} else {
fail("Did not find the leader's index.");
}
}
assertFalse("Did not complete the leader reelection in time.", deadline.isOverdue());
assertEquals(num, numberSeenLeaders);
} finally {
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DefaultLeaderElectionService electionService : leaderElectionService) {
if (electionService != null) {
electionService.stop();
}
}
}
}
@Nonnull
private String createAddress(int i) {
return LEADER_ADDRESS + "_" + i;
}
/**
* Tests the repeated reelection of {@link LeaderContender} once the current leader dies.
* Furthermore, it tests that new LeaderElectionServices can be started later on and that they
* successfully register at ZooKeeper and take part in the leader election.
*/
@Test
public void testZooKeeperReelectionWithReplacement() throws Exception {
int num = 3;
int numTries = 30;
DefaultLeaderElectionService[] leaderElectionService =
new DefaultLeaderElectionService[num];
TestingContender[] contenders = new TestingContender[num];
DefaultLeaderRetrievalService leaderRetrievalService = null;
TestingListener listener = new TestingListener();
try {
leaderRetrievalService =
ZooKeeperUtils.createLeaderRetrievalService(
curatorFrameworkWrapper.asCuratorFramework());
leaderRetrievalService.start(listener);
for (int i = 0; i < num; i++) {
leaderElectionService[i] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[i] =
new TestingContender(
LEADER_ADDRESS + "_" + i + "_0", leaderElectionService[i]);
leaderElectionService[i].start(contenders[i]);
}
String pattern = LEADER_ADDRESS + "_" + "(\\d+)" + "_" + "(\\d+)";
Pattern regex = Pattern.compile(pattern);
for (int i = 0; i < numTries; i++) {
listener.waitForNewLeader();
String address = listener.getAddress();
Matcher m = regex.matcher(address);
if (m.find()) {
int index = Integer.parseInt(m.group(1));
int lastTry = Integer.parseInt(m.group(2));
assertEquals(
listener.getLeaderSessionID(), contenders[index].getLeaderSessionID());
leaderElectionService[index].stop();
leaderElectionService[index] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[index] =
new TestingContender(
LEADER_ADDRESS + "_" + index + "_" + (lastTry + 1),
leaderElectionService[index]);
leaderElectionService[index].start(contenders[index]);
} else {
throw new Exception("Did not find the leader's index.");
}
}
} finally {
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DefaultLeaderElectionService electionService : leaderElectionService) {
if (electionService != null) {
electionService.stop();
}
}
}
}
/**
* Tests that the current leader is notified when his leader connection information in ZooKeeper
* are overwritten. The leader must re-establish the correct leader connection information in
* ZooKeeper.
*/
@Test
public void testLeaderShouldBeCorrectedWhenOverwritten() throws Exception {
final String faultyContenderUrl = "faultyContender";
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
CuratorFrameworkWithUnhandledErrorListener anotherCuratorFrameworkWrapper = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
anotherCuratorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, NoOpFatalErrorHandler.INSTANCE);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeUTF(faultyContenderUrl);
oos.writeObject(UUID.randomUUID());
oos.close();
boolean dataWritten = false;
final String connectionInformationPath =
leaderElectionDriver.getConnectionInformationPath();
while (!dataWritten) {
anotherCuratorFrameworkWrapper
.asCuratorFramework()
.delete()
.forPath(connectionInformationPath);
try {
anotherCuratorFrameworkWrapper
.asCuratorFramework()
.create()
.forPath(connectionInformationPath, baos.toByteArray());
dataWritten = true;
} catch (KeeperException.NodeExistsException e) {
}
}
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
if (retrievalEventHandler.waitForNewLeader().equals(faultyContenderUrl)) {
retrievalEventHandler.waitForNewLeader();
}
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
if (anotherCuratorFrameworkWrapper != null) {
anotherCuratorFrameworkWrapper.close();
}
}
}
/**
* Test that errors in the {@link LeaderElectionDriver} are correctly forwarded to the {@link
* LeaderContender}.
*/
@Test
public void testExceptionForwarding() throws Exception {
LeaderElectionDriver leaderElectionDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
CuratorFramework client = null;
final CreateBuilder mockCreateBuilder =
mock(CreateBuilder.class, Mockito.RETURNS_DEEP_STUBS);
final String exMsg = "Test exception";
final Exception testException = new Exception(exMsg);
final CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(configuration, NoOpFatalErrorHandler.INSTANCE);
try {
client = spy(curatorFrameworkWrapper.asCuratorFramework());
doAnswer(invocation -> mockCreateBuilder).when(client).create();
when(mockCreateBuilder
.creatingParentsIfNeeded()
.withMode(Matchers.any(CreateMode.class))
.forPath(anyString(), any(byte[].class)))
.thenThrow(testException);
leaderElectionDriver = createAndInitLeaderElectionDriver(client, electionEventHandler);
electionEventHandler.waitForError();
assertNotNull(electionEventHandler.getError());
assertThat(
ExceptionUtils.findThrowableWithMessage(electionEventHandler.getError(), exMsg)
.isPresent(),
is(true));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (curatorFrameworkWrapper != null) {
curatorFrameworkWrapper.close();
}
}
}
/**
* Tests that there is no information left in the ZooKeeper cluster after the ZooKeeper client
* has terminated. In other words, checks that the ZooKeeperLeaderElection service uses
* ephemeral nodes.
*/
@Test
public void testEphemeralZooKeeperNodes() throws Exception {
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper = null;
CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper2 = null;
NodeCache cache = null;
try {
curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
curatorFrameworkWrapper2 =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper2.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
cache =
new NodeCache(
curatorFrameworkWrapper2.asCuratorFramework(),
leaderElectionDriver.getConnectionInformationPath());
ExistsCacheListener existsListener = new ExistsCacheListener(cache);
DeletedCacheListener deletedCacheListener = new DeletedCacheListener(cache);
cache.getListenable().addListener(existsListener);
cache.start();
electionEventHandler.waitForLeader();
retrievalEventHandler.waitForNewLeader();
Future<Boolean> existsFuture = existsListener.nodeExists();
existsFuture.get(timeout, TimeUnit.MILLISECONDS);
cache.getListenable().addListener(deletedCacheListener);
leaderElectionDriver.close();
curatorFrameworkWrapper.close();
Future<Boolean> deletedFuture = deletedCacheListener.nodeDeleted();
deletedFuture.get(timeout, TimeUnit.MILLISECONDS);
retrievalEventHandler.waitForEmptyLeaderInformation();
} finally {
electionEventHandler.close();
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
if (cache != null) {
cache.close();
}
if (curatorFrameworkWrapper2 != null) {
curatorFrameworkWrapper2.close();
}
}
}
@Test
public void testNotLeaderShouldNotCleanUpTheLeaderInformation() throws Exception {
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
ZooKeeperLeaderRetrievalDriver leaderRetrievalDriver = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
leaderElectionDriver.notLeader();
electionEventHandler.waitForRevokeLeader();
assertThat(
electionEventHandler.getConfirmedLeaderInformation(),
is(LeaderInformation.empty()));
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
retrievalEventHandler.waitForNewLeader();
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
}
}
/**
* Test that background errors in the {@link LeaderElectionDriver} are correctly forwarded to
* the {@link FatalErrorHandler}.
*/
@Test
public void testUnExpectedErrorForwarding() throws Exception {
LeaderElectionDriver leaderElectionDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingFatalErrorHandler fatalErrorHandler = new TestingFatalErrorHandler();
final FlinkRuntimeException testException =
new FlinkRuntimeException("testUnExpectedErrorForwarding");
final CuratorFrameworkFactory.Builder curatorFrameworkBuilder =
CuratorFrameworkFactory.builder()
.connectString(testingServer.getConnectString())
.retryPolicy(new ExponentialBackoffRetry(1, 0))
.aclProvider(
new ACLProvider() {
@Override
public List<ACL> getDefaultAcl() {
throw testException;
}
@Override
public List<ACL> getAclForPath(String s) {
throw testException;
}
})
.namespace("flink");
try (CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(curatorFrameworkBuilder, fatalErrorHandler)) {
CuratorFramework clientWithErrorHandler = curatorFrameworkWrapper.asCuratorFramework();
assertFalse(fatalErrorHandler.getErrorFuture().isDone());
leaderElectionDriver =
createAndInitLeaderElectionDriver(clientWithErrorHandler, electionEventHandler);
assertThat(
fatalErrorHandler.getErrorFuture().join(),
FlinkMatchers.containsCause(testException));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
}
}
private static class ExistsCacheListener implements NodeCacheListener {
final CompletableFuture<Boolean> existsPromise = new CompletableFuture<>();
final NodeCache cache;
public ExistsCacheListener(final NodeCache cache) {
this.cache = cache;
}
public Future<Boolean> nodeExists() {
return existsPromise;
}
@Override
public void nodeChanged() throws Exception {
ChildData data = cache.getCurrentData();
if (data != null && !existsPromise.isDone()) {
existsPromise.complete(true);
cache.getListenable().removeListener(this);
}
}
}
private static class DeletedCacheListener implements NodeCacheListener {
final CompletableFuture<Boolean> deletedPromise = new CompletableFuture<>();
final NodeCache cache;
public DeletedCacheListener(final NodeCache cache) {
this.cache = cache;
}
public Future<Boolean> nodeDeleted() {
return deletedPromise;
}
@Override
public void nodeChanged() throws Exception {
ChildData data = cache.getCurrentData();
if (data == null && !deletedPromise.isDone()) {
deletedPromise.complete(true);
cache.getListenable().removeListener(this);
}
}
}
private ZooKeeperLeaderElectionDriver createAndInitLeaderElectionDriver(
CuratorFramework client, TestingLeaderElectionEventHandler electionEventHandler)
throws Exception {
final ZooKeeperLeaderElectionDriver leaderElectionDriver =
ZooKeeperUtils.createLeaderElectionDriverFactory(client)
.createLeaderElectionDriver(
electionEventHandler,
electionEventHandler::handleError,
LEADER_ADDRESS);
electionEventHandler.init(leaderElectionDriver);
return leaderElectionDriver;
}
}
|
class ZooKeeperLeaderElectionTest extends TestLogger {
private TestingServer testingServer;
private Configuration configuration;
private CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper;
private static final String LEADER_ADDRESS = "akka
private static final long timeout = 200L * 1000L;
private static final Logger LOG = LoggerFactory.getLogger(ZooKeeperLeaderElectionTest.class);
@Rule
public final TestingFatalErrorHandlerResource testingFatalErrorHandlerResource =
new TestingFatalErrorHandlerResource();
@Before
@After
public void after() throws IOException {
if (curatorFrameworkWrapper != null) {
curatorFrameworkWrapper.close();
curatorFrameworkWrapper = null;
}
if (testingServer != null) {
testingServer.close();
testingServer = null;
}
}
/** Tests that the ZooKeeperLeaderElection/RetrievalService return both the correct URL. */
@Test
public void testZooKeeperLeaderElectionRetrieval() throws Exception {
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
LeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
retrievalEventHandler.waitForNewLeader();
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
}
}
/**
* Tests repeatedly the reelection of still available LeaderContender. After a contender has
* been elected as the leader, it is removed. This forces the DefaultLeaderElectionService to
* elect a new leader.
*/
@Test
public void testZooKeeperReelection() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5L));
int num = 10;
DefaultLeaderElectionService[] leaderElectionService =
new DefaultLeaderElectionService[num];
TestingContender[] contenders = new TestingContender[num];
DefaultLeaderRetrievalService leaderRetrievalService = null;
TestingListener listener = new TestingListener();
try {
leaderRetrievalService =
ZooKeeperUtils.createLeaderRetrievalService(
curatorFrameworkWrapper.asCuratorFramework());
LOG.debug("Start leader retrieval service for the TestingListener.");
leaderRetrievalService.start(listener);
for (int i = 0; i < num; i++) {
leaderElectionService[i] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[i] = new TestingContender(createAddress(i), leaderElectionService[i]);
LOG.debug("Start leader election service for contender
leaderElectionService[i].start(contenders[i]);
}
String pattern = LEADER_ADDRESS + "_" + "(\\d+)";
Pattern regex = Pattern.compile(pattern);
int numberSeenLeaders = 0;
while (deadline.hasTimeLeft() && numberSeenLeaders < num) {
LOG.debug("Wait for new leader
String address = listener.waitForNewLeader();
Matcher m = regex.matcher(address);
if (m.find()) {
int index = Integer.parseInt(m.group(1));
TestingContender contender = contenders[index];
if (address.equals(createAddress(index))
&& listener.getLeaderSessionID()
.equals(contender.getLeaderSessionID())) {
LOG.debug(
"Stop leader election service of contender
numberSeenLeaders);
leaderElectionService[index].stop();
leaderElectionService[index] = null;
numberSeenLeaders++;
}
} else {
fail("Did not find the leader's index.");
}
}
assertFalse("Did not complete the leader reelection in time.", deadline.isOverdue());
assertEquals(num, numberSeenLeaders);
} finally {
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DefaultLeaderElectionService electionService : leaderElectionService) {
if (electionService != null) {
electionService.stop();
}
}
}
}
@Nonnull
private String createAddress(int i) {
return LEADER_ADDRESS + "_" + i;
}
/**
* Tests the repeated reelection of {@link LeaderContender} once the current leader dies.
* Furthermore, it tests that new LeaderElectionServices can be started later on and that they
* successfully register at ZooKeeper and take part in the leader election.
*/
@Test
public void testZooKeeperReelectionWithReplacement() throws Exception {
int num = 3;
int numTries = 30;
DefaultLeaderElectionService[] leaderElectionService =
new DefaultLeaderElectionService[num];
TestingContender[] contenders = new TestingContender[num];
DefaultLeaderRetrievalService leaderRetrievalService = null;
TestingListener listener = new TestingListener();
try {
leaderRetrievalService =
ZooKeeperUtils.createLeaderRetrievalService(
curatorFrameworkWrapper.asCuratorFramework());
leaderRetrievalService.start(listener);
for (int i = 0; i < num; i++) {
leaderElectionService[i] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[i] =
new TestingContender(
LEADER_ADDRESS + "_" + i + "_0", leaderElectionService[i]);
leaderElectionService[i].start(contenders[i]);
}
String pattern = LEADER_ADDRESS + "_" + "(\\d+)" + "_" + "(\\d+)";
Pattern regex = Pattern.compile(pattern);
for (int i = 0; i < numTries; i++) {
listener.waitForNewLeader();
String address = listener.getAddress();
Matcher m = regex.matcher(address);
if (m.find()) {
int index = Integer.parseInt(m.group(1));
int lastTry = Integer.parseInt(m.group(2));
assertEquals(
listener.getLeaderSessionID(), contenders[index].getLeaderSessionID());
leaderElectionService[index].stop();
leaderElectionService[index] =
ZooKeeperUtils.createLeaderElectionService(
curatorFrameworkWrapper.asCuratorFramework());
contenders[index] =
new TestingContender(
LEADER_ADDRESS + "_" + index + "_" + (lastTry + 1),
leaderElectionService[index]);
leaderElectionService[index].start(contenders[index]);
} else {
throw new Exception("Did not find the leader's index.");
}
}
} finally {
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
for (DefaultLeaderElectionService electionService : leaderElectionService) {
if (electionService != null) {
electionService.stop();
}
}
}
}
/**
* Tests that the current leader is notified when his leader connection information in ZooKeeper
* are overwritten. The leader must re-establish the correct leader connection information in
* ZooKeeper.
*/
@Test
public void testLeaderShouldBeCorrectedWhenOverwritten() throws Exception {
final String faultyContenderUrl = "faultyContender";
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
CuratorFrameworkWithUnhandledErrorListener anotherCuratorFrameworkWrapper = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
anotherCuratorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, NoOpFatalErrorHandler.INSTANCE);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeUTF(faultyContenderUrl);
oos.writeObject(UUID.randomUUID());
oos.close();
boolean dataWritten = false;
final String connectionInformationPath =
leaderElectionDriver.getConnectionInformationPath();
while (!dataWritten) {
anotherCuratorFrameworkWrapper
.asCuratorFramework()
.delete()
.forPath(connectionInformationPath);
try {
anotherCuratorFrameworkWrapper
.asCuratorFramework()
.create()
.forPath(connectionInformationPath, baos.toByteArray());
dataWritten = true;
} catch (KeeperException.NodeExistsException e) {
}
}
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
if (retrievalEventHandler.waitForNewLeader().equals(faultyContenderUrl)) {
retrievalEventHandler.waitForNewLeader();
}
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
if (anotherCuratorFrameworkWrapper != null) {
anotherCuratorFrameworkWrapper.close();
}
}
}
/**
* Test that errors in the {@link LeaderElectionDriver} are correctly forwarded to the {@link
* LeaderContender}.
*/
@Test
public void testExceptionForwarding() throws Exception {
LeaderElectionDriver leaderElectionDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
CuratorFramework client = null;
final CreateBuilder mockCreateBuilder =
mock(CreateBuilder.class, Mockito.RETURNS_DEEP_STUBS);
final String exMsg = "Test exception";
final Exception testException = new Exception(exMsg);
final CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(configuration, NoOpFatalErrorHandler.INSTANCE);
try {
client = spy(curatorFrameworkWrapper.asCuratorFramework());
doAnswer(invocation -> mockCreateBuilder).when(client).create();
when(mockCreateBuilder
.creatingParentsIfNeeded()
.withMode(Matchers.any(CreateMode.class))
.forPath(anyString(), any(byte[].class)))
.thenThrow(testException);
leaderElectionDriver = createAndInitLeaderElectionDriver(client, electionEventHandler);
electionEventHandler.waitForError();
assertNotNull(electionEventHandler.getError());
assertThat(
ExceptionUtils.findThrowableWithMessage(electionEventHandler.getError(), exMsg)
.isPresent(),
is(true));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (curatorFrameworkWrapper != null) {
curatorFrameworkWrapper.close();
}
}
}
/**
* Tests that there is no information left in the ZooKeeper cluster after the ZooKeeper client
* has terminated. In other words, checks that the ZooKeeperLeaderElection service uses
* ephemeral nodes.
*/
@Test
public void testEphemeralZooKeeperNodes() throws Exception {
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
LeaderRetrievalDriver leaderRetrievalDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper = null;
CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper2 = null;
NodeCache cache = null;
try {
curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
curatorFrameworkWrapper2 =
ZooKeeperUtils.startCuratorFramework(
configuration, testingFatalErrorHandlerResource.getFatalErrorHandler());
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper2.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
cache =
new NodeCache(
curatorFrameworkWrapper2.asCuratorFramework(),
leaderElectionDriver.getConnectionInformationPath());
ExistsCacheListener existsListener = new ExistsCacheListener(cache);
DeletedCacheListener deletedCacheListener = new DeletedCacheListener(cache);
cache.getListenable().addListener(existsListener);
cache.start();
electionEventHandler.waitForLeader();
retrievalEventHandler.waitForNewLeader();
Future<Boolean> existsFuture = existsListener.nodeExists();
existsFuture.get(timeout, TimeUnit.MILLISECONDS);
cache.getListenable().addListener(deletedCacheListener);
leaderElectionDriver.close();
curatorFrameworkWrapper.close();
Future<Boolean> deletedFuture = deletedCacheListener.nodeDeleted();
deletedFuture.get(timeout, TimeUnit.MILLISECONDS);
retrievalEventHandler.waitForEmptyLeaderInformation();
} finally {
electionEventHandler.close();
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
if (cache != null) {
cache.close();
}
if (curatorFrameworkWrapper2 != null) {
curatorFrameworkWrapper2.close();
}
}
}
@Test
public void testNotLeaderShouldNotCleanUpTheLeaderInformation() throws Exception {
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingLeaderRetrievalEventHandler retrievalEventHandler =
new TestingLeaderRetrievalEventHandler();
ZooKeeperLeaderElectionDriver leaderElectionDriver = null;
ZooKeeperLeaderRetrievalDriver leaderRetrievalDriver = null;
try {
leaderElectionDriver =
createAndInitLeaderElectionDriver(
curatorFrameworkWrapper.asCuratorFramework(), electionEventHandler);
electionEventHandler.waitForLeader();
final LeaderInformation confirmedLeaderInformation =
electionEventHandler.getConfirmedLeaderInformation();
assertThat(confirmedLeaderInformation.getLeaderAddress(), is(LEADER_ADDRESS));
leaderElectionDriver.notLeader();
electionEventHandler.waitForRevokeLeader();
assertThat(
electionEventHandler.getConfirmedLeaderInformation(),
is(LeaderInformation.empty()));
leaderRetrievalDriver =
ZooKeeperUtils.createLeaderRetrievalDriverFactory(
curatorFrameworkWrapper.asCuratorFramework())
.createLeaderRetrievalDriver(
retrievalEventHandler, retrievalEventHandler::handleError);
retrievalEventHandler.waitForNewLeader();
assertThat(
retrievalEventHandler.getLeaderSessionID(),
is(confirmedLeaderInformation.getLeaderSessionID()));
assertThat(
retrievalEventHandler.getAddress(),
is(confirmedLeaderInformation.getLeaderAddress()));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
if (leaderRetrievalDriver != null) {
leaderRetrievalDriver.close();
}
}
}
/**
* Test that background errors in the {@link LeaderElectionDriver} are correctly forwarded to
* the {@link FatalErrorHandler}.
*/
@Test
public void testUnExpectedErrorForwarding() throws Exception {
LeaderElectionDriver leaderElectionDriver = null;
final TestingLeaderElectionEventHandler electionEventHandler =
new TestingLeaderElectionEventHandler(LEADER_ADDRESS);
final TestingFatalErrorHandler fatalErrorHandler = new TestingFatalErrorHandler();
final FlinkRuntimeException testException =
new FlinkRuntimeException("testUnExpectedErrorForwarding");
final CuratorFrameworkFactory.Builder curatorFrameworkBuilder =
CuratorFrameworkFactory.builder()
.connectString(testingServer.getConnectString())
.retryPolicy(new ExponentialBackoffRetry(1, 0))
.aclProvider(
new ACLProvider() {
@Override
public List<ACL> getDefaultAcl() {
throw testException;
}
@Override
public List<ACL> getAclForPath(String s) {
throw testException;
}
})
.namespace("flink");
try (CuratorFrameworkWithUnhandledErrorListener curatorFrameworkWrapper =
ZooKeeperUtils.startCuratorFramework(curatorFrameworkBuilder, fatalErrorHandler)) {
CuratorFramework clientWithErrorHandler = curatorFrameworkWrapper.asCuratorFramework();
assertFalse(fatalErrorHandler.getErrorFuture().isDone());
leaderElectionDriver =
createAndInitLeaderElectionDriver(clientWithErrorHandler, electionEventHandler);
assertThat(
fatalErrorHandler.getErrorFuture().join(),
FlinkMatchers.containsCause(testException));
} finally {
electionEventHandler.close();
if (leaderElectionDriver != null) {
leaderElectionDriver.close();
}
}
}
private static class ExistsCacheListener implements NodeCacheListener {
final CompletableFuture<Boolean> existsPromise = new CompletableFuture<>();
final NodeCache cache;
public ExistsCacheListener(final NodeCache cache) {
this.cache = cache;
}
public Future<Boolean> nodeExists() {
return existsPromise;
}
@Override
public void nodeChanged() throws Exception {
ChildData data = cache.getCurrentData();
if (data != null && !existsPromise.isDone()) {
existsPromise.complete(true);
cache.getListenable().removeListener(this);
}
}
}
private static class DeletedCacheListener implements NodeCacheListener {
final CompletableFuture<Boolean> deletedPromise = new CompletableFuture<>();
final NodeCache cache;
public DeletedCacheListener(final NodeCache cache) {
this.cache = cache;
}
public Future<Boolean> nodeDeleted() {
return deletedPromise;
}
@Override
public void nodeChanged() throws Exception {
ChildData data = cache.getCurrentData();
if (data == null && !deletedPromise.isDone()) {
deletedPromise.complete(true);
cache.getListenable().removeListener(this);
}
}
}
private ZooKeeperLeaderElectionDriver createAndInitLeaderElectionDriver(
CuratorFramework client, TestingLeaderElectionEventHandler electionEventHandler)
throws Exception {
final ZooKeeperLeaderElectionDriver leaderElectionDriver =
ZooKeeperUtils.createLeaderElectionDriverFactory(client)
.createLeaderElectionDriver(
electionEventHandler,
electionEventHandler::handleError,
LEADER_ADDRESS);
electionEventHandler.init(leaderElectionDriver);
return leaderElectionDriver;
}
}
|
Oh, that's right, I'll update.
|
public boolean tryProcessProfileAsync(Consumer<Boolean> task) {
if (executionDAG.getExecutions().isEmpty()) {
return false;
}
if (!jobSpec.isNeedReport()) {
return false;
}
boolean enableAsyncProfile = true;
if (connectContext != null && connectContext.getSessionVariable() != null) {
enableAsyncProfile = connectContext.getSessionVariable().isEnableAsyncProfile();
}
TUniqueId queryId = null;
if (connectContext != null) {
queryId = connectContext.getExecutionId();
}
if (!enableAsyncProfile || !queryProfile.addListener(task)) {
LOG.warn("Profile task is full, execute in sync mode, query id = {}", DebugUtil.printId(queryId));
collectProfileSync();
task.accept(false);
return false;
}
return true;
}
|
LOG.warn("Profile task is full, execute in sync mode, query id = {}", DebugUtil.printId(queryId));
|
public boolean tryProcessProfileAsync(Consumer<Boolean> task) {
if (executionDAG.getExecutions().isEmpty()) {
return false;
}
if (!jobSpec.isNeedReport()) {
return false;
}
boolean enableAsyncProfile = true;
if (connectContext != null && connectContext.getSessionVariable() != null) {
enableAsyncProfile = connectContext.getSessionVariable().isEnableAsyncProfile();
}
TUniqueId queryId = null;
if (connectContext != null) {
queryId = connectContext.getExecutionId();
}
if (!enableAsyncProfile || !queryProfile.addListener(task)) {
if (enableAsyncProfile) {
LOG.info("Profile task is full, execute in sync mode, query id = {}", DebugUtil.printId(queryId));
}
collectProfileSync();
task.accept(false);
return false;
}
return true;
}
|
class Factory implements Coordinator.Factory {
@Override
public DefaultCoordinator createQueryScheduler(ConnectContext context, List<PlanFragment> fragments,
List<ScanNode> scanNodes,
TDescriptorTable descTable) {
JobSpec jobSpec =
JobSpec.Factory.fromQuerySpec(context, fragments, scanNodes, descTable, TQueryType.SELECT);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createInsertScheduler(ConnectContext context, List<PlanFragment> fragments,
List<ScanNode> scanNodes,
TDescriptorTable descTable) {
JobSpec jobSpec = JobSpec.Factory.fromQuerySpec(context, fragments, scanNodes, descTable, TQueryType.LOAD);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createBrokerLoadScheduler(LoadPlanner loadPlanner) {
ConnectContext context = loadPlanner.getContext();
JobSpec jobSpec = JobSpec.Factory.fromBrokerLoadJobSpec(loadPlanner);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createStreamLoadScheduler(LoadPlanner loadPlanner) {
ConnectContext context = loadPlanner.getContext();
JobSpec jobSpec = JobSpec.Factory.fromStreamLoadJobSpec(loadPlanner);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createSyncStreamLoadScheduler(StreamLoadPlanner planner, TNetworkAddress address) {
JobSpec jobSpec = JobSpec.Factory.fromSyncStreamLoadSpec(planner);
return new DefaultCoordinator(jobSpec, planner, address);
}
@Override
public DefaultCoordinator createBrokerExportScheduler(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes,
String timezone,
long startTime, Map<String, String> sessionVariables,
long execMemLimit) {
ConnectContext context = new ConnectContext();
context.setQualifiedUser(AuthenticationMgr.ROOT_USER);
context.setCurrentUserIdentity(UserIdentity.ROOT);
context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
context.getSessionVariable().setEnablePipelineEngine(true);
context.getSessionVariable().setPipelineDop(0);
JobSpec jobSpec = JobSpec.Factory.fromBrokerExportSpec(context, jobId, queryId, descTable,
fragments, scanNodes, timezone,
startTime, sessionVariables, execMemLimit);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createNonPipelineBrokerLoadScheduler(Long jobId, TUniqueId queryId,
DescriptorTable descTable,
List<PlanFragment> fragments,
List<ScanNode> scanNodes,
String timezone,
long startTime,
Map<String, String> sessionVariables,
ConnectContext context, long execMemLimit) {
JobSpec jobSpec = JobSpec.Factory.fromNonPipelineBrokerLoadJobSpec(context, jobId, queryId, descTable,
fragments, scanNodes, timezone,
startTime, sessionVariables, execMemLimit);
return new DefaultCoordinator(context, jobSpec);
}
}
|
class Factory implements Coordinator.Factory {
@Override
public DefaultCoordinator createQueryScheduler(ConnectContext context, List<PlanFragment> fragments,
List<ScanNode> scanNodes,
TDescriptorTable descTable) {
JobSpec jobSpec =
JobSpec.Factory.fromQuerySpec(context, fragments, scanNodes, descTable, TQueryType.SELECT);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createInsertScheduler(ConnectContext context, List<PlanFragment> fragments,
List<ScanNode> scanNodes,
TDescriptorTable descTable) {
JobSpec jobSpec = JobSpec.Factory.fromQuerySpec(context, fragments, scanNodes, descTable, TQueryType.LOAD);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createBrokerLoadScheduler(LoadPlanner loadPlanner) {
ConnectContext context = loadPlanner.getContext();
JobSpec jobSpec = JobSpec.Factory.fromBrokerLoadJobSpec(loadPlanner);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createStreamLoadScheduler(LoadPlanner loadPlanner) {
ConnectContext context = loadPlanner.getContext();
JobSpec jobSpec = JobSpec.Factory.fromStreamLoadJobSpec(loadPlanner);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createSyncStreamLoadScheduler(StreamLoadPlanner planner, TNetworkAddress address) {
JobSpec jobSpec = JobSpec.Factory.fromSyncStreamLoadSpec(planner);
return new DefaultCoordinator(jobSpec, planner, address);
}
@Override
public DefaultCoordinator createBrokerExportScheduler(Long jobId, TUniqueId queryId, DescriptorTable descTable,
List<PlanFragment> fragments, List<ScanNode> scanNodes,
String timezone,
long startTime, Map<String, String> sessionVariables,
long execMemLimit) {
ConnectContext context = new ConnectContext();
context.setQualifiedUser(AuthenticationMgr.ROOT_USER);
context.setCurrentUserIdentity(UserIdentity.ROOT);
context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
context.getSessionVariable().setEnablePipelineEngine(true);
context.getSessionVariable().setPipelineDop(0);
JobSpec jobSpec = JobSpec.Factory.fromBrokerExportSpec(context, jobId, queryId, descTable,
fragments, scanNodes, timezone,
startTime, sessionVariables, execMemLimit);
return new DefaultCoordinator(context, jobSpec);
}
@Override
public DefaultCoordinator createNonPipelineBrokerLoadScheduler(Long jobId, TUniqueId queryId,
DescriptorTable descTable,
List<PlanFragment> fragments,
List<ScanNode> scanNodes,
String timezone,
long startTime,
Map<String, String> sessionVariables,
ConnectContext context, long execMemLimit) {
JobSpec jobSpec = JobSpec.Factory.fromNonPipelineBrokerLoadJobSpec(context, jobId, queryId, descTable,
fragments, scanNodes, timezone,
startTime, sessionVariables, execMemLimit);
return new DefaultCoordinator(context, jobSpec);
}
}
|
Can be replaced with ``` Set<String> notFoundNodes = new HashSet<>(hostnames); notFoundNodes.removeAll(nodes); ```
|
public List<Node> nodesFromHostnames(List<String> hostnames) {
List<Node> nodes = hostnames.stream()
.filter(h -> nodeMap.containsKey(h))
.map(h -> nodeMap.get(h))
.collect(Collectors.toList());
if (nodes.size() != hostnames.size()) {
List<String> notFoundNodes = hostnames.stream()
.filter(h -> !nodes.stream()
.map(Node::hostname).collect(Collectors.toSet()).contains(h))
.collect(Collectors.toList());
throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]",
String.join(", ", notFoundNodes)));
}
return nodes;
}
|
List<String> notFoundNodes = hostnames.stream()
|
public List<Node> nodesFromHostnames(List<String> hostnames) {
List<Node> nodes = hostnames.stream()
.filter(h -> nodeMap.containsKey(h))
.map(h -> nodeMap.get(h))
.collect(Collectors.toList());
if (nodes.size() != hostnames.size()) {
Set<String> notFoundNodes = new HashSet<>(hostnames);
notFoundNodes.removeAll(nodes.stream().map(Node::hostname).collect(Collectors.toList()));
throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]",
String.join(", ", notFoundNodes)));
}
return nodes;
}
|
class CapacityChecker {
private List<Node> hosts;
Map<String, Node> nodeMap;
private Map<Node, List<Node>> nodeChildren;
private Map<Node, AllocationResources> availableResources;
public AllocationHistory allocationHistory = null;
public CapacityChecker(NodeRepository nodeRepository) {
this.hosts = getHosts(nodeRepository);
List<Node> tenants = getTenants(nodeRepository, hosts);
nodeMap = constructHostnameToNodeMap(hosts);
this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap);
this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren);
}
public List<Node> getHosts() {
return hosts;
}
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() {
Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources);
return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources);
}
protected List<Node> findOvercommittedHosts() {
return findOvercommittedNodes(availableResources);
}
public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) {
var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (removal.isEmpty()) return Optional.empty();
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = removal.get();
return Optional.of(failurePath);
}
private static Node.State[] relevantNodeStates = {
Node.State.active,
Node.State.inactive,
Node.State.dirty,
Node.State.provisioned,
Node.State.ready,
Node.State.reserved
};
private List<Node> getHosts(NodeRepository nodeRepository) {
return nodeRepository.getNodes(NodeType.host, relevantNodeStates);
}
private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) {
var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet());
return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream()
.filter(t -> parentNames.contains(t.parentHostname().orElse("")))
.collect(Collectors.toList());
}
private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts,
Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
if (hosts.size() == 0) return Optional.empty();
List<Node> parentRemovalPriorityList = heuristic.entrySet().stream()
.sorted(Comparator.comparingInt(Map.Entry::getValue))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (int i = 1; i <= parentRemovalPriorityList.size(); i++) {
List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i);
var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (hostRemovalFailure.isPresent()) {
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = hostRemovalFailure.get();
return Optional.of(failurePath);
}
}
throw new IllegalStateException("No path to failure found. This should be impossible!");
}
private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) {
return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n));
}
private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) {
Map<Node, List<Node>> nodeChildren = tenants.stream()
.filter(n -> n.parentHostname().isPresent())
.filter(n -> hostnameToNode.containsKey(n.parentHostname().get()))
.collect(Collectors.groupingBy(
n -> hostnameToNode.get(n.parentHostname().orElseThrow())));
for (var host : hosts) nodeChildren.putIfAbsent(host, List.of());
return nodeChildren;
}
private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) {
Map<Node, AllocationResources> availableResources = new HashMap<>();
for (var host : hosts) {
NodeResources hostResources = host.flavor().resources();
int occupiedIps = 0;
Set<String> ipPool = host.ipAddressPool().asSet();
for (var child : nodeChildren.get(host)) {
hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any));
occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count();
}
availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps));
}
return availableResources;
}
/**
* Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing
* the host causes an unrecoverable state
*/
private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap(
Function.identity(),
_x -> Integer.MAX_VALUE
));
for (Node host : hosts) {
List<Node> children = nodeChildren.get(host);
if (children.size() == 0) continue;
Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources);
Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren);
int timesHostCanBeRemoved = 0;
Optional<Node> unallocatedNode;
while (timesHostCanBeRemoved < 1000) {
unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations);
if (unallocatedNode.isEmpty()) {
timesHostCanBeRemoved++;
} else break;
}
timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved);
}
return timesNodeCanBeRemoved;
}
private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) {
List<Node> overcommittedNodes = new ArrayList<>();
for (var entry : availableResources.entrySet()) {
var resources = entry.getValue().nodeResources;
if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) {
overcommittedNodes.add(entry.getKey());
}
}
return overcommittedNodes;
}
private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) {
return nodeChildren.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().stream()
.map(Node::allocation).flatMap(Optional::stream)
.collect(Collectors.toList())
));
}
/**
* Tests whether it's possible to remove the provided hosts.
* Does not mutate any input variable.
* @return Empty optional if removal is possible, information on what caused the failure otherwise
*/
private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts,
Map<Node, List<Node>> nodechildren,
Map<Node, AllocationResources> availableResources) {
var containedAllocations = collateAllocations(nodechildren);
var resourceMap = new HashMap<>(availableResources);
List<Node> validAllocationTargets = allHosts.stream()
.filter(h -> !hostsToRemove.contains(h))
.collect(Collectors.toList());
if (validAllocationTargets.size() == 0) {
return Optional.of(HostRemovalFailure.none());
}
allocationHistory = new AllocationHistory();
for (var host : hostsToRemove) {
Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host),
validAllocationTargets, resourceMap, containedAllocations, true);
if (unallocatedNode.isPresent()) {
AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(),
validAllocationTargets, resourceMap, containedAllocations);
return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures));
}
}
return Optional.empty();
}
/**
* Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations,
* optionally returning the first node to fail, if one does.
* */
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false);
}
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations, boolean withHistory) {
for (var node : nodes) {
var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations);
if (newParent.isEmpty()) {
if (withHistory) allocationHistory.addEntry(node, null, 0);
return Optional.of(node);
}
if (withHistory) {
long eligibleParents =
hosts.stream().filter(h ->
!violatesParentHostPolicy(node, h, containedAllocations)
&& availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count();
allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1);
}
}
return Optional.empty();
}
/**
* @return The parent to which the node was allocated, if it was successfully allocated.
*/
private Optional<Node> tryAllocateNode(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources());
for (var host : hosts) {
var availableHostResources = availableResources.get(host);
if (violatesParentHostPolicy(node, host, containedAllocations)) {
continue;
}
if (availableHostResources.satisfies(requiredNodeResources)) {
availableResources.put(host, availableHostResources.subtract(requiredNodeResources));
if (node.allocation().isPresent()) {
containedAllocations.get(host).add(node.allocation().get());
}
return Optional.of(host);
}
}
return Optional.empty();
}
private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) {
if (node.allocation().isEmpty()) return false;
Allocation nodeAllocation = node.allocation().get();
for (var allocation : containedAllocations.get(host)) {
if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster())
&& allocation.owner().equals(nodeAllocation.owner())) {
return true;
}
}
return false;
}
private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>();
for (var host : hosts) {
AllocationFailureReason reason = new AllocationFailureReason(host);
var availableHostResources = availableResources.get(host);
reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations);
NodeResources l = availableHostResources.nodeResources;
NodeResources r = node.flavor().resources();
if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; }
if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; }
if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; }
if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed())
{ reason.incompatibleDiskSpeed = true; }
if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; }
allocationFailureReasons.add(reason);
}
return new AllocationFailureReasonList(allocationFailureReasons);
}
/**
* Contains the list of hosts that, upon being removed, caused an unrecoverable state,
* as well as the specific host and tenant which caused it.
*/
public static class HostFailurePath {
public List<Node> hostsCausingFailure;
public HostRemovalFailure failureReason;
}
/**
* Data class used for detailing why removing the given tenant from the given host was unsuccessful.
* A failure might not be caused by failing to allocate a specific tenant, in which case the fields
* will be empty.
*/
public static class HostRemovalFailure {
public Optional<Node> host;
public Optional<Node> tenant;
public AllocationFailureReasonList failureReasons;
public static HostRemovalFailure none() {
return new HostRemovalFailure(
Optional.empty(),
Optional.empty(),
new AllocationFailureReasonList(List.of()));
}
public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) {
return new HostRemovalFailure(
Optional.of(host),
Optional.of(tenant),
failureReasons);
}
private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList failureReasons) {
this.host = host;
this.tenant = tenant;
this.failureReasons = failureReasons;
}
@Override
public String toString() {
if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists.";
return String.format(
"Failure to remove host %s" +
"\n\tNo new host found for tenant %s:" +
"\n\t\tSingular Reasons: %s" +
"\n\t\tTotal Reasons: %s",
this.host.get().hostname(),
this.tenant.get().hostname(),
this.failureReasons.singularReasonFailures().toString(),
this.failureReasons.toString()
);
}
}
/**
* Used to describe the resources required for a tenant, and available to a host.
*/
private static class AllocationResources {
NodeResources nodeResources;
int availableIPs;
public static AllocationResources from(NodeResources nodeResources) {
return new AllocationResources(nodeResources, 1);
}
public AllocationResources(NodeResources nodeResources, int availableIPs) {
this.nodeResources = nodeResources;
this.availableIPs = availableIPs;
}
public boolean satisfies(AllocationResources other) {
if (!this.nodeResources.satisfies(other.nodeResources)) return false;
return this.availableIPs >= other.availableIPs;
}
public AllocationResources subtract(AllocationResources other) {
return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs);
}
}
/**
* Keeps track of the reason why a host rejected an allocation.
*/
private static class AllocationFailureReason {
Node host;
public AllocationFailureReason (Node host) {
this.host = host;
}
public boolean insufficientVcpu = false;
public boolean insufficientMemoryGb = false;
public boolean insufficientDiskGb = false;
public boolean incompatibleDiskSpeed = false;
public boolean insufficientAvailableIPs = false;
public boolean violatesParentHostPolicy = false;
public int numberOfReasons() {
int n = 0;
if (insufficientVcpu) n++;
if (insufficientMemoryGb) n++;
if (insufficientDiskGb) n++;
if (incompatibleDiskSpeed) n++;
if (insufficientAvailableIPs) n++;
if (violatesParentHostPolicy) n++;
return n;
}
@Override
public String toString() {
List<String> reasons = new ArrayList<>();
if (insufficientVcpu) reasons.add("insufficientVcpu");
if (insufficientMemoryGb) reasons.add("insufficientMemoryGb");
if (insufficientDiskGb) reasons.add("insufficientDiskGb");
if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed");
if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs");
if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy");
return String.format("[%s]", String.join(", ", reasons));
}
}
/**
* Provides convenient methods for tallying failures.
*/
public static class AllocationFailureReasonList {
private List<AllocationFailureReason> allocationFailureReasons;
public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) {
this.allocationFailureReasons = allocationFailureReasons;
}
public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); }
public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); }
public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); }
public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); }
public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); }
public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); }
public AllocationFailureReasonList singularReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList()));
}
public AllocationFailureReasonList multipleReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList()));
}
public long size() {
return allocationFailureReasons.size();
}
@Override
public String toString() {
return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)",
insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(),
incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy());
}
}
public static class AllocationHistory {
public static class Entry {
public Node tenant;
public Node newParent;
public long eligibleParents;
public Entry(Node tenant, Node newParent, long eligibleParents) {
this.tenant = tenant;
this.newParent = newParent;
this.eligibleParents = eligibleParents;
}
@Override
public String toString() {
return String.format("%-20s %-65s -> %15s [%3d valid]",
tenant.hostname().replaceFirst("\\..+", ""),
tenant.flavor().resources(),
newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""),
this.eligibleParents
);
}
}
public List<Entry> historyEntries;
public AllocationHistory() {
this.historyEntries = new ArrayList<>();
}
public void addEntry(Node tenant, Node newParent, long eligibleParents) {
this.historyEntries.add(new Entry(tenant, newParent, eligibleParents));
}
public Set<String> oldParents() {
Set<String> oldParents = new HashSet<>();
for (var entry : historyEntries)
entry.tenant.parentHostname().ifPresent(oldParents::add);
return oldParents;
}
@Override
public String toString() {
StringBuilder out = new StringBuilder();
String currentParent = "";
for (var entry : historyEntries) {
String parentName = entry.tenant.parentHostname().orElseThrow();
if (!parentName.equals(currentParent)) {
currentParent = parentName;
out.append(parentName).append("\n");
}
out.append(entry.toString()).append("\n");
}
return out.toString();
}
}
}
|
class CapacityChecker {
private List<Node> hosts;
Map<String, Node> nodeMap;
private Map<Node, List<Node>> nodeChildren;
private Map<Node, AllocationResources> availableResources;
public AllocationHistory allocationHistory = null;
public CapacityChecker(NodeRepository nodeRepository) {
this.hosts = getHosts(nodeRepository);
List<Node> tenants = getTenants(nodeRepository, hosts);
nodeMap = constructHostnameToNodeMap(hosts);
this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap);
this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren);
}
public List<Node> getHosts() {
return hosts;
}
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() {
Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources);
return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources);
}
protected List<Node> findOvercommittedHosts() {
return findOvercommittedNodes(availableResources);
}
public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) {
var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (removal.isEmpty()) return Optional.empty();
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = removal.get();
return Optional.of(failurePath);
}
private static Node.State[] relevantNodeStates = {
Node.State.active,
Node.State.inactive,
Node.State.dirty,
Node.State.provisioned,
Node.State.ready,
Node.State.reserved
};
private List<Node> getHosts(NodeRepository nodeRepository) {
return nodeRepository.getNodes(NodeType.host, relevantNodeStates);
}
private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) {
var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet());
return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream()
.filter(t -> parentNames.contains(t.parentHostname().orElse("")))
.collect(Collectors.toList());
}
private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts,
Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
if (hosts.size() == 0) return Optional.empty();
List<Node> parentRemovalPriorityList = heuristic.entrySet().stream()
.sorted(Comparator.comparingInt(Map.Entry::getValue))
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (int i = 1; i <= parentRemovalPriorityList.size(); i++) {
List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i);
var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources);
if (hostRemovalFailure.isPresent()) {
HostFailurePath failurePath = new HostFailurePath();
failurePath.hostsCausingFailure = hostsToRemove;
failurePath.failureReason = hostRemovalFailure.get();
return Optional.of(failurePath);
}
}
throw new IllegalStateException("No path to failure found. This should be impossible!");
}
private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) {
return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n));
}
private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) {
Map<Node, List<Node>> nodeChildren = tenants.stream()
.filter(n -> n.parentHostname().isPresent())
.filter(n -> hostnameToNode.containsKey(n.parentHostname().get()))
.collect(Collectors.groupingBy(
n -> hostnameToNode.get(n.parentHostname().orElseThrow())));
for (var host : hosts) nodeChildren.putIfAbsent(host, List.of());
return nodeChildren;
}
private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) {
Map<Node, AllocationResources> availableResources = new HashMap<>();
for (var host : hosts) {
NodeResources hostResources = host.flavor().resources();
int occupiedIps = 0;
Set<String> ipPool = host.ipAddressPool().asSet();
for (var child : nodeChildren.get(host)) {
hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any));
occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count();
}
availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps));
}
return availableResources;
}
/**
* Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing
* the host causes an unrecoverable state
*/
private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren,
Map<Node, AllocationResources> availableResources) {
Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap(
Function.identity(),
_x -> Integer.MAX_VALUE
));
for (Node host : hosts) {
List<Node> children = nodeChildren.get(host);
if (children.size() == 0) continue;
Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources);
Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren);
int timesHostCanBeRemoved = 0;
Optional<Node> unallocatedNode;
while (timesHostCanBeRemoved < 1000) {
unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations);
if (unallocatedNode.isEmpty()) {
timesHostCanBeRemoved++;
} else break;
}
timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved);
}
return timesNodeCanBeRemoved;
}
private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) {
List<Node> overcommittedNodes = new ArrayList<>();
for (var entry : availableResources.entrySet()) {
var resources = entry.getValue().nodeResources;
if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) {
overcommittedNodes.add(entry.getKey());
}
}
return overcommittedNodes;
}
private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) {
return nodeChildren.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getValue().stream()
.map(Node::allocation).flatMap(Optional::stream)
.collect(Collectors.toList())
));
}
/**
* Tests whether it's possible to remove the provided hosts.
* Does not mutate any input variable.
* @return Empty optional if removal is possible, information on what caused the failure otherwise
*/
private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts,
Map<Node, List<Node>> nodechildren,
Map<Node, AllocationResources> availableResources) {
var containedAllocations = collateAllocations(nodechildren);
var resourceMap = new HashMap<>(availableResources);
List<Node> validAllocationTargets = allHosts.stream()
.filter(h -> !hostsToRemove.contains(h))
.collect(Collectors.toList());
if (validAllocationTargets.size() == 0) {
return Optional.of(HostRemovalFailure.none());
}
allocationHistory = new AllocationHistory();
for (var host : hostsToRemove) {
Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host),
validAllocationTargets, resourceMap, containedAllocations, true);
if (unallocatedNode.isPresent()) {
AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(),
validAllocationTargets, resourceMap, containedAllocations);
return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures));
}
}
return Optional.empty();
}
/**
* Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations,
* optionally returning the first node to fail, if one does.
* */
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false);
}
private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations, boolean withHistory) {
for (var node : nodes) {
var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations);
if (newParent.isEmpty()) {
if (withHistory) allocationHistory.addEntry(node, null, 0);
return Optional.of(node);
}
if (withHistory) {
long eligibleParents =
hosts.stream().filter(h ->
!violatesParentHostPolicy(node, h, containedAllocations)
&& availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count();
allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1);
}
}
return Optional.empty();
}
/**
* @return The parent to which the node was allocated, if it was successfully allocated.
*/
private Optional<Node> tryAllocateNode(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources());
for (var host : hosts) {
var availableHostResources = availableResources.get(host);
if (violatesParentHostPolicy(node, host, containedAllocations)) {
continue;
}
if (availableHostResources.satisfies(requiredNodeResources)) {
availableResources.put(host, availableHostResources.subtract(requiredNodeResources));
if (node.allocation().isPresent()) {
containedAllocations.get(host).add(node.allocation().get());
}
return Optional.of(host);
}
}
return Optional.empty();
}
private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) {
if (node.allocation().isEmpty()) return false;
Allocation nodeAllocation = node.allocation().get();
for (var allocation : containedAllocations.get(host)) {
if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster())
&& allocation.owner().equals(nodeAllocation.owner())) {
return true;
}
}
return false;
}
private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts,
Map<Node, AllocationResources> availableResources,
Map<Node, List<Allocation>> containedAllocations) {
List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>();
for (var host : hosts) {
AllocationFailureReason reason = new AllocationFailureReason(host);
var availableHostResources = availableResources.get(host);
reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations);
NodeResources l = availableHostResources.nodeResources;
NodeResources r = node.flavor().resources();
if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; }
if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; }
if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; }
if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed())
{ reason.incompatibleDiskSpeed = true; }
if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; }
allocationFailureReasons.add(reason);
}
return new AllocationFailureReasonList(allocationFailureReasons);
}
/**
* Contains the list of hosts that, upon being removed, caused an unrecoverable state,
* as well as the specific host and tenant which caused it.
*/
public static class HostFailurePath {
public List<Node> hostsCausingFailure;
public HostRemovalFailure failureReason;
}
/**
* Data class used for detailing why removing the given tenant from the given host was unsuccessful.
* A failure might not be caused by failing to allocate a specific tenant, in which case the fields
* will be empty.
*/
public static class HostRemovalFailure {
public Optional<Node> host;
public Optional<Node> tenant;
public AllocationFailureReasonList allocationFailures;
public static HostRemovalFailure none() {
return new HostRemovalFailure(
Optional.empty(),
Optional.empty(),
new AllocationFailureReasonList(List.of()));
}
public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) {
return new HostRemovalFailure(
Optional.of(host),
Optional.of(tenant),
failureReasons);
}
private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList allocationFailures) {
this.host = host;
this.tenant = tenant;
this.allocationFailures = allocationFailures;
}
@Override
public String toString() {
if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists.";
return String.format(
"Failure to remove host %s" +
"\n\tNo new host found for tenant %s:" +
"\n\t\tSingular Reasons: %s" +
"\n\t\tTotal Reasons: %s",
this.host.get().hostname(),
this.tenant.get().hostname(),
this.allocationFailures.singularReasonFailures().toString(),
this.allocationFailures.toString()
);
}
}
/**
* Used to describe the resources required for a tenant, and available to a host.
*/
private static class AllocationResources {
NodeResources nodeResources;
int availableIPs;
public static AllocationResources from(NodeResources nodeResources) {
return new AllocationResources(nodeResources, 1);
}
public AllocationResources(NodeResources nodeResources, int availableIPs) {
this.nodeResources = nodeResources;
this.availableIPs = availableIPs;
}
public boolean satisfies(AllocationResources other) {
if (!this.nodeResources.satisfies(other.nodeResources)) return false;
return this.availableIPs >= other.availableIPs;
}
public AllocationResources subtract(AllocationResources other) {
return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs);
}
}
/**
* Keeps track of the reason why a host rejected an allocation.
*/
private static class AllocationFailureReason {
Node host;
public AllocationFailureReason (Node host) {
this.host = host;
}
public boolean insufficientVcpu = false;
public boolean insufficientMemoryGb = false;
public boolean insufficientDiskGb = false;
public boolean incompatibleDiskSpeed = false;
public boolean insufficientAvailableIPs = false;
public boolean violatesParentHostPolicy = false;
public int numberOfReasons() {
int n = 0;
if (insufficientVcpu) n++;
if (insufficientMemoryGb) n++;
if (insufficientDiskGb) n++;
if (incompatibleDiskSpeed) n++;
if (insufficientAvailableIPs) n++;
if (violatesParentHostPolicy) n++;
return n;
}
@Override
public String toString() {
List<String> reasons = new ArrayList<>();
if (insufficientVcpu) reasons.add("insufficientVcpu");
if (insufficientMemoryGb) reasons.add("insufficientMemoryGb");
if (insufficientDiskGb) reasons.add("insufficientDiskGb");
if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed");
if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs");
if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy");
return String.format("[%s]", String.join(", ", reasons));
}
}
/**
* Provides convenient methods for tallying failures.
*/
public static class AllocationFailureReasonList {
private List<AllocationFailureReason> allocationFailureReasons;
public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) {
this.allocationFailureReasons = allocationFailureReasons;
}
public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); }
public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); }
public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); }
public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); }
public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); }
public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); }
public AllocationFailureReasonList singularReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList()));
}
public AllocationFailureReasonList multipleReasonFailures() {
return new AllocationFailureReasonList(allocationFailureReasons.stream()
.filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList()));
}
public long size() {
return allocationFailureReasons.size();
}
@Override
public String toString() {
return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)",
insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(),
incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy());
}
}
public static class AllocationHistory {
public static class Entry {
public Node tenant;
public Node newParent;
public long eligibleParents;
public Entry(Node tenant, Node newParent, long eligibleParents) {
this.tenant = tenant;
this.newParent = newParent;
this.eligibleParents = eligibleParents;
}
@Override
public String toString() {
return String.format("%-20s %-65s -> %15s [%3d valid]",
tenant.hostname().replaceFirst("\\..+", ""),
tenant.flavor().resources(),
newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""),
this.eligibleParents
);
}
}
public List<Entry> historyEntries;
public AllocationHistory() {
this.historyEntries = new ArrayList<>();
}
public void addEntry(Node tenant, Node newParent, long eligibleParents) {
this.historyEntries.add(new Entry(tenant, newParent, eligibleParents));
}
public Set<String> oldParents() {
Set<String> oldParents = new HashSet<>();
for (var entry : historyEntries)
entry.tenant.parentHostname().ifPresent(oldParents::add);
return oldParents;
}
@Override
public String toString() {
StringBuilder out = new StringBuilder();
String currentParent = "";
for (var entry : historyEntries) {
String parentName = entry.tenant.parentHostname().orElseThrow();
if (!parentName.equals(currentParent)) {
currentParent = parentName;
out.append(parentName).append("\n");
}
out.append(entry.toString()).append("\n");
}
return out.toString();
}
}
}
|
put the code block back to `run` method and remove these functions
|
public void run(ConnectContext ctx, StmtExecutor executor) throws Exception {
if (ctx.isTxnModel()) {
throw new AnalysisException("insert into table command is not supported in txn model");
}
checkDatabaseAndTable(ctx);
getColumns();
getPartition();
ctx.getStatementContext().getInsertIntoContext().setTargetSchema(targetColumns);
LogicalPlanAdapter logicalPlanAdapter = new LogicalPlanAdapter(extractPlan(logicalQuery),
ctx.getStatementContext());
planner = new NereidsPlanner(ctx.getStatementContext());
planner.plan(logicalPlanAdapter, ctx.getSessionVariable().toThrift());
getTupleDesc();
addUnassignedColumns();
if (ctx.getMysqlChannel() != null) {
ctx.getMysqlChannel().reset();
}
String label = this.labelName;
if (label == null) {
label = String.format("label_%x_%x", ctx.queryId().hi, ctx.queryId().lo);
}
Transaction txn;
PlanFragment root = planner.getFragments().get(0);
DataSink sink = createDataSink(ctx, root);
Preconditions.checkArgument(sink instanceof OlapTableSink, "olap table sink is expected when"
+ " running insert into select");
txn = new Transaction(ctx, database, table, label, planner);
OlapTableSink olapTableSink = ((OlapTableSink) sink);
olapTableSink.init(ctx.queryId(), txn.getTxnId(), database.getId(), ctx.getExecTimeout(),
ctx.getSessionVariable().getSendBatchParallelism(), false);
olapTableSink.complete();
root.resetSink(olapTableSink);
if (isExplain()) {
executor.handleExplainStmt(((ExplainCommand) logicalQuery).getExplainString(planner));
return;
}
txn.executeInsertIntoSelectCommand(executor);
}
|
getPartition();
|
public void run(ConnectContext ctx, StmtExecutor executor) throws Exception {
if (ctx.isTxnModel()) {
throw new AnalysisException("insert into table command is not supported in txn model");
}
LogicalPlanAdapter logicalPlanAdapter = new LogicalPlanAdapter(logicalQuery, ctx.getStatementContext());
planner = new NereidsPlanner(ctx.getStatementContext());
planner.plan(logicalPlanAdapter, ctx.getSessionVariable().toThrift());
if (ctx.getMysqlChannel() != null) {
ctx.getMysqlChannel().reset();
}
String label = this.labelName;
if (label == null) {
label = String.format("label_%x_%x", ctx.queryId().hi, ctx.queryId().lo);
}
PhysicalOlapTableSink<?> physicalOlapTableSink = ((PhysicalOlapTableSink) planner.getPhysicalPlan());
OlapTableSink sink = ((OlapTableSink) planner.getFragments().get(0).getSink());
Preconditions.checkArgument(!isTxnBegin, "an insert command cannot create more than one txn");
Transaction txn = new Transaction(ctx,
physicalOlapTableSink.getDatabase(),
physicalOlapTableSink.getTargetTable(), label, planner);
isTxnBegin = true;
sink.init(ctx.queryId(), txn.getTxnId(),
physicalOlapTableSink.getDatabase().getId(),
ctx.getExecTimeout(),
ctx.getSessionVariable().getSendBatchParallelism(), false);
sink.complete();
TransactionState state = Env.getCurrentGlobalTransactionMgr().getTransactionState(
physicalOlapTableSink.getDatabase().getId(),
txn.getTxnId());
if (state == null) {
throw new DdlException("txn does not exist: " + txn.getTxnId());
}
state.addTableIndexes(physicalOlapTableSink.getTargetTable());
executor.setProfileType(ProfileType.LOAD);
LOG.info("Nereids start to execute the insert command, query id: {}, txn id: {}",
ctx.queryId(), txn.getTxnId());
txn.executeInsertIntoTableCommand(executor);
if (ctx.getState().getStateType() == MysqlStateType.ERR) {
try {
String errMsg = Strings.emptyToNull(ctx.getState().getErrorMessage());
Env.getCurrentGlobalTransactionMgr().abortTransaction(
physicalOlapTableSink.getDatabase().getId(), txn.getTxnId(),
(errMsg == null ? "unknown reason" : errMsg));
} catch (Exception abortTxnException) {
LOG.warn("errors when abort txn. {}", ctx.getQueryIdentifier(), abortTxnException);
}
}
}
|
class InsertIntoTableCommand extends Command implements ForwardWithSync {
public static final Logger LOG = LogManager.getLogger(InsertIntoTableCommand.class);
private final List<String> tableName;
private final List<String> colNames;
private final LogicalPlan logicalQuery;
private final String labelName;
private Database database;
private Table table;
private NereidsPlanner planner;
private TupleDescriptor olapTuple;
private List<String> partitions;
private List<String> hints;
private List<Column> targetColumns;
private List<Long> partitionIds = null;
/**
* constructor
*/
public InsertIntoTableCommand(List<String> tableName, String labelName, List<String> colNames,
List<String> partitions, List<String> hints, LogicalPlan logicalQuery) {
super(PlanType.INSERT_INTO_SELECT_COMMAND);
Preconditions.checkArgument(tableName != null, "tableName cannot be null in insert-into-select command");
Preconditions.checkArgument(logicalQuery != null, "logicalQuery cannot be null in insert-into-select command");
this.tableName = tableName;
this.labelName = labelName;
this.colNames = colNames;
this.partitions = partitions;
this.hints = hints;
this.logicalQuery = logicalQuery;
}
public NereidsPlanner getPlanner() {
return planner;
}
@Override
private void checkDatabaseAndTable(ConnectContext ctx) {
List<String> qualifier = RelationUtil.getQualifierName(ctx, tableName);
String catalogName = qualifier.get(0);
String dbName = qualifier.get(1);
String tableName = qualifier.get(2);
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(catalogName);
if (catalog == null) {
throw new RuntimeException(String.format("Catalog %s does not exist.", catalogName));
}
try {
database = ((Database) catalog.getDb(dbName).orElseThrow(() ->
new RuntimeException("Database [" + dbName + "] does not exist.")));
table = database.getTable(tableName).orElseThrow(() ->
new RuntimeException("Table [" + tableName + "] does not exist in database [" + dbName + "]."));
} catch (Throwable e) {
throw new AnalysisException(e.getMessage(), e.getCause());
}
}
private LogicalPlan extractPlan(LogicalPlan plan) {
if (plan instanceof ExplainCommand) {
return ((ExplainCommand) plan).getLogicalPlan();
}
return plan;
}
private DataSink createDataSink(ConnectContext ctx, PlanFragment root)
throws org.apache.doris.common.AnalysisException {
DataSink dataSink;
if (table instanceof OlapTable) {
dataSink = new OlapTableSink((OlapTable) table, olapTuple, partitionIds,
ctx.getSessionVariable().isEnableSingleReplicaInsert());
} else {
dataSink = DataSink.createDataSink(table);
}
return dataSink;
}
private void getColumns() {
if (colNames == null) {
this.targetColumns = table.getFullSchema();
} else {
this.targetColumns = Lists.newArrayList();
for (String colName : colNames) {
Column col = table.getColumn(colName);
if (col == null) {
throw new AnalysisException(String.format("Column: %s is not in table: %s",
colName, table.getName()));
}
this.targetColumns.add(col);
}
}
}
private void getTupleDesc() {
olapTuple = planner.getDescTable().createTupleDescriptor();
for (Column col : targetColumns) {
SlotDescriptor slotDesc = planner.getDescTable().addSlotDescriptor(olapTuple);
slotDesc.setIsMaterialized(true);
slotDesc.setType(col.getType());
slotDesc.setColumn(col);
slotDesc.setIsNullable(col.isAllowNull());
}
}
/**
* calculate PhysicalProperties.
*/
public PhysicalProperties calculatePhysicalProperties(List<Slot> outputs) {
List<ExprId> exprIds = outputs.subList(0, ((OlapTable) table).getKeysNum()).stream()
.map(NamedExpression::getExprId).collect(Collectors.toList());
return PhysicalProperties.createHash(new DistributionSpecHash(exprIds, ShuffleType.NATURAL));
}
private void addUnassignedColumns() throws org.apache.doris.common.AnalysisException {
PlanFragment root = planner.getFragments().get(0);
List<Expr> outputs = root.getOutputExprs();
if (outputs.size() == table.getFullSchema().size()) {
return;
}
int i = 0;
List<Expr> newOutputs = Lists.newArrayListWithCapacity(table.getFullSchema().size());
for (Column column : table.getFullSchema()) {
if (column.isVisible()) {
newOutputs.add(outputs.get(i++));
} else {
newOutputs.add(LiteralExpr.create("0", column.getType()));
}
}
root.setOutputExprs(newOutputs);
}
private void getPartition() {
if (partitions == null) {
return;
}
partitionIds = partitions.stream().map(pn -> {
Partition p = table.getPartition(pn);
if (p == null) {
throw new AnalysisException(String.format("Unknown partition: %s in table: %s", pn, table.getName()));
}
return p.getId();
}).collect(Collectors.toList());
}
public boolean isExplain() {
return logicalQuery instanceof ExplainCommand;
}
@Override
public <R, C> R accept(PlanVisitor<R, C> visitor, C context) {
return visitor.visitInsertIntoCommand(this, context);
}
}
|
class InsertIntoTableCommand extends Command implements ForwardWithSync {
public static final Logger LOG = LogManager.getLogger(InsertIntoTableCommand.class);
private final LogicalPlan logicalQuery;
private final String labelName;
private NereidsPlanner planner;
private boolean isTxnBegin = false;
/**
* constructor
*/
public InsertIntoTableCommand(LogicalPlan logicalQuery, String labelName) {
super(PlanType.INSERT_INTO_TABLE_COMMAND);
Preconditions.checkNotNull(logicalQuery, "logicalQuery cannot be null in InsertIntoTableCommand");
this.logicalQuery = logicalQuery;
this.labelName = labelName;
}
public NereidsPlanner getPlanner() {
return planner;
}
@Override
@Override
public <R, C> R accept(PlanVisitor<R, C> visitor, C context) {
return visitor.visitInsertIntoCommand(this, context);
}
}
|
`concurrent_mutations` and `test_and_set_failed` metrics do not need to be included, as they cannot happen for read-only operations
|
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
}
|
metrics.add(new Metric("vds.distributor.visitor.sum.failures.test_and_set_failed.rate"));
|
private static Set<Metric> getStorageMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("vds.datastored.alldisks.docs.average"));
metrics.add(new Metric("vds.datastored.alldisks.bytes.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.max"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.count"));
metrics.add(new Metric("vds.filestor.alldisks.queuesize.average"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_window_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_waiting_threads.count"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.max"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.sum"));
metrics.add(new Metric("vds.filestor.alldisks.throttle_active_tokens.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_rpc_direct_dispatches.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.throttled_persistence_thread_polls.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allstripes.timeouts_waiting_for_throttle_token.rate"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count"));
metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count"));
metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate"));
metrics.add(new Metric("vds.idealstate.buckets_rechecking.average"));
metrics.add(new Metric("vds.idealstate.idealstate_diff.average"));
metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average"));
metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average"));
metrics.add(new Metric("vds.idealstate.buckets.average"));
metrics.add(new Metric("vds.idealstate.buckets_notrusted.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average"));
metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average"));
metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate"));
metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.split_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.join_bucket.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate"));
metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count"));
metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.max"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.count"));
metrics.add(new Metric("vds.distributor.puts.sum.latency.average"));
metrics.add(new Metric("vds.distributor.puts.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.max"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.count"));
metrics.add(new Metric("vds.distributor.removes.sum.latency.average"));
metrics.add(new Metric("vds.distributor.removes.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.max"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.count"));
metrics.add(new Metric("vds.distributor.updates.sum.latency.average"));
metrics.add(new Metric("vds.distributor.updates.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate"));
metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.max"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.count"));
metrics.add(new Metric("vds.distributor.gets.sum.latency.average"));
metrics.add(new Metric("vds.distributor.gets.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.max"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.count"));
metrics.add(new Metric("vds.distributor.visitor.sum.latency.average"));
metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notready.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notconnected.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.wrongdistributor.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.safe_time_not_reached.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.storagefailure.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.timeout.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.busy.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.inconsistent_bucket.rate"));
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
metrics.add(new Metric("vds.distributor.docsstored.average"));
metrics.add(new Metric("vds.distributor.bytesstored.average"));
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.max"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.sum"));
metrics.add(new Metric("vds.mergethrottler.active_window_size.count"));
metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate"));
metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate"));
return metrics;
}
|
class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
}
|
class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
getVespaMetrics(),
singleton(defaultVespaMetricSet));
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.addAll(getSearchNodeMetrics());
metrics.addAll(getStorageMetrics());
metrics.addAll(getDocprocMetrics());
metrics.addAll(getClusterControllerMetrics());
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
private static Set<Metric> getSentinelMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("sentinel.restarts.count"));
metrics.add(new Metric("sentinel.totalRestarts.last"));
metrics.add(new Metric("sentinel.uptime.last"));
metrics.add(new Metric("sentinel.running.count"));
metrics.add(new Metric("sentinel.running.last"));
return metrics;
}
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures"));
metrics.add(new Metric("jrt.transport.peer-authorization-failures"));
metrics.add(new Metric("jrt.transport.server.tls-connections-established"));
metrics.add(new Metric("jrt.transport.client.tls-connections-established"));
metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established"));
metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established"));
metrics.add(new Metric("vds.server.network.tls-handshakes-failed"));
metrics.add(new Metric("vds.server.network.peer-authorization-failures"));
metrics.add(new Metric("vds.server.network.client.tls-connections-established"));
metrics.add(new Metric("vds.server.network.server.tls-connections-established"));
metrics.add(new Metric("vds.server.network.client.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.server.insecure-connections-established"));
metrics.add(new Metric("vds.server.network.tls-connections-broken"));
metrics.add(new Metric("vds.server.network.failed-tls-config-reloads"));
metrics.add(new Metric("vds.server.fnet.num-connections"));
metrics.add(new Metric("node-certificate.expiry.seconds"));
return metrics;
}
private static Set<Metric> getConfigServerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("configserver.requests.count"));
metrics.add(new Metric("configserver.failedRequests.count"));
metrics.add(new Metric("configserver.latency.max"));
metrics.add(new Metric("configserver.latency.sum"));
metrics.add(new Metric("configserver.latency.count"));
metrics.add(new Metric("configserver.latency.average"));
metrics.add(new Metric("configserver.cacheConfigElems.last"));
metrics.add(new Metric("configserver.cacheChecksumElems.last"));
metrics.add(new Metric("configserver.hosts.last"));
metrics.add(new Metric("configserver.delayedResponses.count"));
metrics.add(new Metric("configserver.sessionChangeErrors.count"));
metrics.add(new Metric("configserver.zkZNodes.last"));
metrics.add(new Metric("configserver.zkAvgLatency.last"));
metrics.add(new Metric("configserver.zkMaxLatency.last"));
metrics.add(new Metric("configserver.zkConnections.last"));
metrics.add(new Metric("configserver.zkOutstandingRequests.last"));
return metrics;
}
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, "jdisc.http.requests", List.of("rate", "count"));
metrics.add(new Metric("handled.requests.count"));
metrics.add(new Metric("handled.latency.max"));
metrics.add(new Metric("handled.latency.sum"));
metrics.add(new Metric("handled.latency.count"));
metrics.add(new Metric("handled.latency.average"));
metrics.add(new Metric("serverRejectedRequests.rate"));
metrics.add(new Metric("serverRejectedRequests.count"));
metrics.add(new Metric("serverThreadPoolSize.average"));
metrics.add(new Metric("serverThreadPoolSize.min"));
metrics.add(new Metric("serverThreadPoolSize.max"));
metrics.add(new Metric("serverThreadPoolSize.rate"));
metrics.add(new Metric("serverThreadPoolSize.count"));
metrics.add(new Metric("serverThreadPoolSize.last"));
metrics.add(new Metric("serverActiveThreads.average"));
metrics.add(new Metric("serverActiveThreads.min"));
metrics.add(new Metric("serverActiveThreads.max"));
metrics.add(new Metric("serverActiveThreads.rate"));
metrics.add(new Metric("serverActiveThreads.sum"));
metrics.add(new Metric("serverActiveThreads.count"));
metrics.add(new Metric("serverActiveThreads.last"));
metrics.add(new Metric("serverNumOpenConnections.average"));
metrics.add(new Metric("serverNumOpenConnections.max"));
metrics.add(new Metric("serverNumOpenConnections.last"));
metrics.add(new Metric("serverNumConnections.average"));
metrics.add(new Metric("serverNumConnections.max"));
metrics.add(new Metric("serverNumConnections.last"));
metrics.add(new Metric("serverBytesReceived.sum"));
metrics.add(new Metric("serverBytesReceived.count"));
metrics.add(new Metric("serverBytesSent.sum"));
metrics.add(new Metric("serverBytesSent.count"));
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
metrics.add(new Metric("httpapi_latency.sum"));
metrics.add(new Metric("httpapi_latency.count"));
metrics.add(new Metric("httpapi_latency.average"));
metrics.add(new Metric("httpapi_pending.max"));
metrics.add(new Metric("httpapi_pending.sum"));
metrics.add(new Metric("httpapi_pending.count"));
metrics.add(new Metric("httpapi_pending.average"));
metrics.add(new Metric("httpapi_num_operations.rate"));
metrics.add(new Metric("httpapi_num_updates.rate"));
metrics.add(new Metric("httpapi_num_removes.rate"));
metrics.add(new Metric("httpapi_num_puts.rate"));
metrics.add(new Metric("httpapi_succeeded.rate"));
metrics.add(new Metric("httpapi_failed.rate"));
metrics.add(new Metric("httpapi_parse_error.rate"));
addMetric(metrics, "httpapi_condition_not_met", List.of("rate"));
addMetric(metrics, "httpapi_not_found", List.of("rate"));
metrics.add(new Metric("mem.heap.total.average"));
metrics.add(new Metric("mem.heap.free.average"));
metrics.add(new Metric("mem.heap.used.average"));
metrics.add(new Metric("mem.heap.used.max"));
metrics.add(new Metric("jdisc.memory_mappings.max"));
metrics.add(new Metric("jdisc.open_file_descriptors.max"));
metrics.add(new Metric("jdisc.gc.count.average"));
metrics.add(new Metric("jdisc.gc.count.max"));
metrics.add(new Metric("jdisc.gc.count.last"));
metrics.add(new Metric("jdisc.gc.ms.average"));
metrics.add(new Metric("jdisc.gc.ms.max"));
metrics.add(new Metric("jdisc.gc.ms.last"));
metrics.add(new Metric("jdisc.deactivated_containers.total.last"));
metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last"));
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("container-iam-role.expiry.seconds"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
metrics.add(new Metric("http.status.3xx.rate"));
metrics.add(new Metric("http.status.4xx.rate"));
metrics.add(new Metric("http.status.5xx.rate"));
metrics.add(new Metric("http.status.401.rate"));
metrics.add(new Metric("http.status.403.rate"));
metrics.add(new Metric("jdisc.http.request.uri_length.max"));
metrics.add(new Metric("jdisc.http.request.uri_length.sum"));
metrics.add(new Metric("jdisc.http.request.uri_length.count"));
metrics.add(new Metric("jdisc.http.request.uri_length.average"));
metrics.add(new Metric("jdisc.http.request.content_size.max"));
metrics.add(new Metric("jdisc.http.request.content_size.sum"));
metrics.add(new Metric("jdisc.http.request.content_size.count"));
metrics.add(new Metric("jdisc.http.request.content_size.average"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate"));
metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate"));
metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max"));
addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate"));
addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate"));
addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate"));
addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate"));
return metrics;
}
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics =new LinkedHashSet<>();
metrics.add(new Metric("cluster-controller.down.count.last"));
metrics.add(new Metric("cluster-controller.initializing.count.last"));
metrics.add(new Metric("cluster-controller.maintenance.count.last"));
metrics.add(new Metric("cluster-controller.retired.count.last"));
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.work-ms.last"));
metrics.add(new Metric("cluster-controller.work-ms.sum"));
metrics.add(new Metric("cluster-controller.work-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
metrics.add(new Metric("reindexing.progress.last"));
return metrics;
}
private static Set<Metric> getDocprocMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("documents_processed.rate"));
return metrics;
}
private static Set<Metric> getQrserverMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("peak_qps.max"));
metrics.add(new Metric("search_connections.max"));
metrics.add(new Metric("search_connections.sum"));
metrics.add(new Metric("search_connections.count"));
metrics.add(new Metric("search_connections.average"));
metrics.add(new Metric("feed.latency.max"));
metrics.add(new Metric("feed.latency.sum"));
metrics.add(new Metric("feed.latency.count"));
metrics.add(new Metric("feed.latency.average"));
metrics.add(new Metric("feed.http-requests.count"));
metrics.add(new Metric("feed.http-requests.rate"));
metrics.add(new Metric("queries.rate"));
metrics.add(new Metric("query_container_latency.max"));
metrics.add(new Metric("query_container_latency.sum"));
metrics.add(new Metric("query_container_latency.count"));
metrics.add(new Metric("query_container_latency.average"));
metrics.add(new Metric("query_latency.max"));
metrics.add(new Metric("query_latency.sum"));
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.average"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
metrics.add(new Metric("hits_per_query.sum"));
metrics.add(new Metric("hits_per_query.count"));
metrics.add(new Metric("hits_per_query.average"));
metrics.add(new Metric("hits_per_query.95percentile"));
metrics.add(new Metric("hits_per_query.99percentile"));
metrics.add(new Metric("query_hit_offset.max"));
metrics.add(new Metric("query_hit_offset.sum"));
metrics.add(new Metric("query_hit_offset.count"));
metrics.add(new Metric("documents_covered.count"));
metrics.add(new Metric("documents_total.count"));
metrics.add(new Metric("dispatch_internal.rate"));
metrics.add(new Metric("dispatch_fdispatch.rate"));
addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average"));
addMetric(metrics, "query_item_count", Set.of("max", "sum", "count"));
metrics.add(new Metric("totalhits_per_query.max"));
metrics.add(new Metric("totalhits_per_query.sum"));
metrics.add(new Metric("totalhits_per_query.count"));
metrics.add(new Metric("totalhits_per_query.average"));
metrics.add(new Metric("totalhits_per_query.95percentile"));
metrics.add(new Metric("totalhits_per_query.99percentile"));
metrics.add(new Metric("empty_results.rate"));
metrics.add(new Metric("requestsOverQuota.rate"));
metrics.add(new Metric("requestsOverQuota.count"));
metrics.add(new Metric("relevance.at_1.sum"));
metrics.add(new Metric("relevance.at_1.count"));
metrics.add(new Metric("relevance.at_1.average"));
metrics.add(new Metric("relevance.at_3.sum"));
metrics.add(new Metric("relevance.at_3.count"));
metrics.add(new Metric("relevance.at_3.average"));
metrics.add(new Metric("relevance.at_10.sum"));
metrics.add(new Metric("relevance.at_10.count"));
metrics.add(new Metric("relevance.at_10.average"));
metrics.add(new Metric("error.timeout.rate"));
metrics.add(new Metric("error.backends_oos.rate"));
metrics.add(new Metric("error.plugin_failure.rate"));
metrics.add(new Metric("error.backend_communication_error.rate"));
metrics.add(new Metric("error.empty_document_summaries.rate"));
metrics.add(new Metric("error.invalid_query_parameter.rate"));
metrics.add(new Metric("error.internal_server_error.rate"));
metrics.add(new Metric("error.misconfigured_server.rate"));
metrics.add(new Metric("error.invalid_query_transformation.rate"));
metrics.add(new Metric("error.result_with_errors.rate"));
metrics.add(new Metric("error.unspecified.rate"));
metrics.add(new Metric("error.unhandled_exception.rate"));
return metrics;
}
private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) {
metrics.add(new Metric(prefix + ".queuesize.max"));
metrics.add(new Metric(prefix + ".queuesize.sum"));
metrics.add(new Metric(prefix + ".queuesize.count"));
metrics.add(new Metric(prefix + ".maxpending.last"));
metrics.add(new Metric(prefix + ".accepted.rate"));
metrics.add(new Metric(prefix + ".wakeups.rate"));
metrics.add(new Metric(prefix + ".utilization.max"));
metrics.add(new Metric(prefix + ".utilization.sum"));
metrics.add(new Metric(prefix + ".utilization.count"));
}
private static Set<Metric> getSearchNodeMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
metrics.add(new Metric("content.proton.documentdb.documents.removed.last"));
metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max"));
metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last"));
metrics.add(new Metric("content.proton.transport.query.count.rate"));
metrics.add(new Metric("content.proton.docsum.docs.rate"));
metrics.add(new Metric("content.proton.docsum.latency.max"));
metrics.add(new Metric("content.proton.docsum.latency.sum"));
metrics.add(new Metric("content.proton.docsum.latency.count"));
metrics.add(new Metric("content.proton.docsum.latency.average"));
metrics.add(new Metric("content.proton.transport.query.latency.max"));
metrics.add(new Metric("content.proton.transport.query.latency.sum"));
metrics.add(new Metric("content.proton.transport.query.latency.count"));
metrics.add(new Metric("content.proton.transport.query.latency.average"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum"));
metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count"));
metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count"));
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average"));
metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average"));
metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average"));
metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average"));
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer");
addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer");
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last"));
metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last"));
metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last"));
metrics.add(new Metric("content.proton.resource_usage.disk.average"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.memory.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.total_utilization.max"));
metrics.add(new Metric("content.proton.resource_usage.memory_usage.transient.max"));
metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_memory.average"));
metrics.add(new Metric("content.proton.resource_usage.transient_disk.average"));
metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max"));
metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max"));
metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last"));
metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.setup.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.read.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.write.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.compact.count"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.max"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.sum"));
metrics.add(new Metric("content.proton.resource_usage.cpu_util.other.count"));
metrics.add(new Metric("content.proton.transactionlog.entries.average"));
metrics.add(new Metric("content.proton.transactionlog.disk_usage.average"));
metrics.add(new Metric("content.proton.transactionlog.replay_time.last"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate"));
metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average"));
metrics.add(new Metric("content.proton.documentdb.matching.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.grouping_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
return metrics;
}
private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
for (String suffix : aggregateSuffices) {
metrics.add(new Metric(metricName + "." + suffix));
}
}
}
|
The grammar seems to allow annotations, but are we handling them? https://github.com/ballerina-platform/ballerina-lang/pull/21209/files#diff-0e72b59e6b830c29e9c6161f3bb4177aR844 The following should fail since an annotation allowed only on const is being used with a let var, but doesn't. ```ballerina int globalVar = 5; const annotation v1 on source const; public function main() { int b = let @v1 int x = 4 in 2 * x * globalVar; } ```
|
public void exitServiceConstructorExpression(BallerinaParser.ServiceConstructorExpressionContext ctx) {
if (isInErrorState) {
return;
}
final DiagnosticPos serviceDefPos = getCurrentPos(ctx);
final String serviceVarName = null;
final DiagnosticPos varPos = serviceDefPos;
this.pkgBuilder.endServiceDef(serviceDefPos, getWS(ctx), serviceVarName, varPos, true,
ctx.serviceConstructorExpr().annotationAttachment().size());
}
|
}
|
public void exitServiceConstructorExpression(BallerinaParser.ServiceConstructorExpressionContext ctx) {
if (isInErrorState) {
return;
}
final DiagnosticPos serviceDefPos = getCurrentPos(ctx);
final String serviceVarName = null;
final DiagnosticPos varPos = serviceDefPos;
this.pkgBuilder.endServiceDef(serviceDefPos, getWS(ctx), serviceVarName, varPos, true,
ctx.serviceConstructorExpr().annotationAttachment().size());
}
|
class BLangParserListener extends BallerinaParserBaseListener {
private static final String KEYWORD_PUBLIC = "public";
private static final String KEYWORD_KEY = "key";
private BLangPackageBuilder pkgBuilder;
private BDiagnosticSource diagnosticSrc;
private BLangDiagnosticLog dlog;
private List<String> pkgNameComps;
private String pkgVersion;
private boolean isInErrorState = false;
private Pattern pattern = Pattern.compile(Constants.UNICODE_REGEX);
BLangParserListener(CompilerContext context, CompilationUnitNode compUnit, BDiagnosticSource diagnosticSource) {
this.pkgBuilder = new BLangPackageBuilder(context, compUnit);
this.diagnosticSrc = diagnosticSource;
this.dlog = BLangDiagnosticLog.getInstance(context);
}
@Override
public void enterParameterList(BallerinaParser.ParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
@Override
public void exitParameter(BallerinaParser.ParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addSimpleVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), false,
ctx.annotationAttachment().size(), ctx.PUBLIC() != null);
}
/**
* {@inheritDoc}
*/
@Override
public void enterFormalParameterList(BallerinaParser.FormalParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitFormalParameterList(BallerinaParser.FormalParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFormalParameterList(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDefaultableParameter(BallerinaParser.DefaultableParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addDefaultableParam(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRestParameter(BallerinaParser.RestParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRestParam(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), ctx.annotationAttachment().size());
}
@Override
public void exitRestParameterTypeName(BallerinaParser.RestParameterTypeNameContext ctx) {
if (isInErrorState) {
return;
}
pkgBuilder.addRestParam(getCurrentPos(ctx), getWS(ctx), null, null, 0);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterTypeName(BallerinaParser.ParameterTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addSimpleVar(getCurrentPos(ctx), getWS(ctx), null, null, false, 0);
}
@Override
public void enterCompilationUnit(BallerinaParser.CompilationUnitContext ctx) {
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompilationUnit(BallerinaParser.CompilationUnitContext ctx) {
this.pkgBuilder.endCompilationUnit(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitPackageName(BallerinaParser.PackageNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgNameComps = new ArrayList<>();
ctx.Identifier().forEach(e -> pkgNameComps.add(e.getText()));
this.pkgVersion = ctx.version() != null ? ctx.version().versionPattern().getText() : null;
}
/**
* {@inheritDoc}
*/
@Override
public void exitImportDeclaration(BallerinaParser.ImportDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String alias = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
BallerinaParser.OrgNameContext orgNameContext = ctx.orgName();
if (orgNameContext == null) {
this.pkgBuilder.addImportPackageDeclaration(getCurrentPos(ctx), getWS(ctx),
null, this.pkgNameComps, this.pkgVersion, alias);
} else {
this.pkgBuilder.addImportPackageDeclaration(getCurrentPos(ctx), getWS(ctx),
orgNameContext.getText(), this.pkgNameComps, this.pkgVersion, alias);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitServiceDefinition(BallerinaParser.ServiceDefinitionContext ctx) {
if (isInErrorState) {
return;
}
final DiagnosticPos serviceDefPos = getCurrentPos(ctx);
final String serviceVarName = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
final DiagnosticPos varPos =
ctx.Identifier() != null ? getCurrentPos(ctx.Identifier()) : serviceDefPos;
this.pkgBuilder.endServiceDef(serviceDefPos, getWS(ctx), serviceVarName, varPos, false);
}
/**
* {@inheritDoc}
*/
@Override
public void enterServiceBody(BallerinaParser.ServiceBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startServiceDef(getCurrentPos(ctx));
this.pkgBuilder.startObjectType();
}
/**
* {@inheritDoc}
*/
@Override
public void exitServiceBody(BallerinaParser.ServiceBodyContext ctx) {
if (isInErrorState) {
return;
}
boolean isFieldAnalyseRequired = (ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext
|| ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext)
|| ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
this.pkgBuilder
.addObjectType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, false, false, false, true);
}
/**
* {@inheritDoc}
*/
@Override
public void enterBlockFunctionBody(BallerinaParser.BlockFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startBlockFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitBlockFunctionBody(BallerinaParser.BlockFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endBlockFunctionBody(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterExprFunctionBody(BallerinaParser.ExprFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitExprFunctionBody(BallerinaParser.ExprFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprFunctionBody(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterExternalFunctionBody(BallerinaParser.ExternalFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExternFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitExternalFunctionBody(BallerinaParser.ExternalFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExternalFunctionBody(ctx.annotationAttachment().size(), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterFunctionDefinition(BallerinaParser.FunctionDefinitionContext ctx) {
if (isInErrorState) {
return;
}
int annotCount = ((BallerinaParser.CompilationUnitContext) ctx.parent.parent).annotationAttachment().size();
this.pkgBuilder.startFunctionDef(annotCount, false);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFunctionDefinition(BallerinaParser.FunctionDefinitionContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
boolean publicFunc = ctx.PUBLIC() != null;
boolean privateFunc = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean nativeFunc = ctx.functionDefinitionBody().externalFunctionBody() != null;
this.pkgBuilder.endFunctionDefinition(getCurrentPos(ctx), getWS(ctx), funcName,
getCurrentPos(ctx.anyIdentifierName()), publicFunc, remoteFunc,
nativeFunc, privateFunc, false);
}
@Override
public void enterExplicitAnonymousFunctionExpr(BallerinaParser.ExplicitAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLambdaFunctionDef(diagnosticSrc.pkgID);
}
@Override
public void exitExplicitAnonymousFunctionExpr(BallerinaParser.ExplicitAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLambdaFunctionDef(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterInferAnonymousFunctionExpr(BallerinaParser.InferAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
@Override
public void exitInferAnonymousFunctionExpression(BallerinaParser.InferAnonymousFunctionExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addArrowFunctionDef(getCurrentPos(ctx), getWS(ctx), diagnosticSrc.pkgID);
}
@Override
public void exitInferParamList(BallerinaParser.InferParamListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWSForInferParamList(getWS(ctx));
}
@Override
public void exitInferParam(BallerinaParser.InferParamContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addVarWithoutType(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), false, 0);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFunctionSignature(BallerinaParser.FunctionSignatureContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFunctionSignature(getCurrentPos(ctx), getWS(ctx), ctx.formalParameterList() != null,
ctx.returnParameter() != null, ctx.formalParameterList() != null
&& ctx.formalParameterList().restParameter() != null);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFiniteType(BallerinaParser.FiniteTypeContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFiniteType(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTypeDefinition(BallerinaParser.TypeDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean publicObject = ctx.PUBLIC() != null;
this.pkgBuilder.endTypeDefinition(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), publicObject);
}
/**
* {@inheritDoc}
*/
@Override
public void enterObjectBody(BallerinaParser.ObjectBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectType();
}
/**
* {@inheritDoc}
*/
@Override
public void exitObjectBody(BallerinaParser.ObjectBodyContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext)
|| (ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext
&& ctx.parent.parent.parent instanceof BallerinaParser.FiniteTypeContext
&& ctx.parent.parent.parent.getChildCount() > 1);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
boolean isAbstract = ((ObjectTypeNameLabelContext) ctx.parent).ABSTRACT() != null;
boolean isClient = ((ObjectTypeNameLabelContext) ctx.parent).CLIENT() != null;
this.pkgBuilder.addObjectType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous, isAbstract,
isClient, false);
}
@Override
public void exitTypeReference(BallerinaParser.TypeReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTypeReference(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitFieldDefinition(BallerinaParser.FieldDefinitionContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos currentPos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String name = ctx.Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.Identifier());
boolean exprAvailable = ctx.expression() != null;
boolean isOptional = ctx.QUESTION_MARK() != null;
this.pkgBuilder.addFieldVariable(currentPos, ws, name, identifierPos, exprAvailable,
ctx.annotationAttachment().size(), false, isOptional);
}
/**
* {@inheritDoc}
*/
@Override
public void exitObjectFieldDefinition(BallerinaParser.ObjectFieldDefinitionContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos currentPos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String name = ctx.Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.Identifier());
boolean exprAvailable = ctx.expression() != null;
int annotationCount = ctx.annotationAttachment().size();
boolean isPrivate = ctx.PRIVATE() != null;
boolean isPublic = ctx.PUBLIC() != null;
this.pkgBuilder.addObjectFieldVariable(currentPos, ws, name, identifierPos, exprAvailable, annotationCount,
isPrivate, isPublic);
}
/**
* {@inheritDoc}
*/
@Override
public void enterMethodDeclaration(BallerinaParser.MethodDeclarationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectFunctionDef();
}
/**
* {@inheritDoc}
*/
@Override
public void enterMethodDefinition(BallerinaParser.MethodDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectFunctionDef();
}
/**
* {@inheritDoc}
*/
@Override
public void exitMethodDefinition(BallerinaParser.MethodDefinitionContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
DiagnosticPos funcNamePos = getCurrentPos(ctx.anyIdentifierName());
boolean publicFunc = ctx.PUBLIC() != null;
boolean isPrivate = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean resourceFunc = ctx.RESOURCE() != null;
boolean markdownDocExists = ctx.documentationString() != null;
this.pkgBuilder.endObjectAttachedFunctionDef(getCurrentPos(ctx), getWS(ctx), funcName, funcNamePos, publicFunc,
isPrivate, remoteFunc, resourceFunc, false, markdownDocExists,
ctx.annotationAttachment().size());
}
/**
* {@inheritDoc}
*/
@Override
public void exitMethodDeclaration(BallerinaParser.MethodDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
DiagnosticPos funcNamePos = getCurrentPos(ctx.anyIdentifierName());
boolean isPublic = ctx.PUBLIC() != null;
boolean isPrivate = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean resourceFunc = ctx.RESOURCE() != null;
boolean markdownDocExists = ctx.documentationString() != null;
this.pkgBuilder.endObjectAttachedFunctionDef(getCurrentPos(ctx), getWS(ctx), funcName, funcNamePos, isPublic,
isPrivate, remoteFunc, resourceFunc, true, markdownDocExists,
ctx.annotationAttachment().size());
}
/**
* {@inheritDoc}
*/
@Override
public void enterAnnotationDefinition(BallerinaParser.AnnotationDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAnnotationDef(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAnnotationDefinition(BallerinaParser.AnnotationDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean publicAnnotation = KEYWORD_PUBLIC.equals(ctx.getChild(0).getText());
boolean isTypeAttached = ctx.typeName() != null;
boolean isConst = ctx.CONST() != null;
this.pkgBuilder.endAnnotationDef(getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), publicAnnotation, isTypeAttached, isConst);
}
/**
* {@inheritDoc}
*/
@Override
public void exitConstantDefinition(BallerinaParser.ConstantDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean isPublic = ctx.PUBLIC() != null;
boolean isTypeAvailable = ctx.typeName() != null;
this.pkgBuilder.addConstant(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), isPublic, isTypeAvailable);
}
@Override
public void exitConstDivMulModExpression(BallerinaParser.ConstDivMulModExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitConstAddSubExpression(BallerinaParser.ConstAddSubExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitConstGroupExpression(BallerinaParser.ConstGroupExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createGroupExpression(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitGlobalVariableDefinition(BallerinaParser.GlobalVariableDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean isPublic = ctx.PUBLIC() != null;
boolean isFinal = ctx.FINAL() != null;
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
boolean isListenerVar = ctx.LISTENER() != null;
boolean isTypeNameProvided = ctx.typeName() != null;
this.pkgBuilder.addGlobalVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), isPublic, isFinal,
isDeclaredWithVar, isExpressionAvailable, isListenerVar, isTypeNameProvided);
}
@Override
public void exitAttachmentPoint(BallerinaParser.AttachmentPointContext ctx) {
if (isInErrorState) {
return;
}
AttachPoint attachPoint;
if (ctx.dualAttachPoint() != null) {
if (ctx.dualAttachPoint().SOURCE() != null) {
attachPoint = AttachPoint.getAttachmentPoint(ctx.dualAttachPoint().dualAttachPointIdent().getText(),
true);
} else {
attachPoint = AttachPoint.getAttachmentPoint(ctx.getText(), false);
}
} else {
attachPoint = AttachPoint.getAttachmentPoint(
ctx.sourceOnlyAttachPoint().sourceOnlyAttachPointIdent().getText(), true);
}
this.pkgBuilder.addAttachPoint(attachPoint, getWS(ctx));
}
@Override
public void enterWorkerDeclaration(BallerinaParser.WorkerDeclarationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWorker(diagnosticSrc.pkgID);
}
@Override
public void exitWorkerDeclaration(BallerinaParser.WorkerDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String workerName = null;
DiagnosticPos workerNamePos = null;
if (ctx.workerDefinition() != null) {
workerName = escapeQuotedIdentifier(ctx.workerDefinition().Identifier().getText());
workerNamePos = getCurrentPos(ctx.workerDefinition().Identifier());
}
boolean retParamsAvail = ctx.workerDefinition().returnParameter() != null;
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.addWorker(
getCurrentPos(ctx), getWS(ctx), workerName, workerNamePos, retParamsAvail, numAnnotations);
}
/**
* {@inheritDoc}
*/
@Override
public void exitWorkerDefinition(BallerinaParser.WorkerDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.attachWorkerWS(getWS(ctx));
}
@Override
public void exitArrayTypeNameLabel(BallerinaParser.ArrayTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
int index = 1;
int dimensions = 0;
List<Integer> sizes = new ArrayList<>();
List<ParseTree> children = ctx.children;
while (index < children.size()) {
if (children.get(index).getText().equals("[")) {
if (children.get(index + 1).getText().equals("]")) {
sizes.add(UNSEALED_ARRAY_INDICATOR);
index += 2;
} else if (children.get(index + 1).getText().equals(OPEN_SEALED_ARRAY)) {
sizes.add(OPEN_SEALED_ARRAY_INDICATOR);
index += 1;
} else {
sizes.add(Integer.parseInt(children.get(index + 1).getText()));
index += 1;
}
dimensions++;
} else {
index++;
}
}
Collections.reverse(sizes);
this.pkgBuilder.addArrayType(
getCurrentPos(ctx), getWS(ctx), dimensions, sizes.stream().mapToInt(val -> val).toArray());
}
@Override
public void exitUnionTypeNameLabel(BallerinaParser.UnionTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addUnionType(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTupleTypeNameLabel(BallerinaParser.TupleTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTupleType(getCurrentPos(ctx), getWS(ctx), ctx.tupleTypeDescriptor().typeName().size(),
ctx.tupleTypeDescriptor().tupleRestDescriptor() != null);
}
@Override
public void exitNullableTypeNameLabel(BallerinaParser.NullableTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.markTypeNodeAsNullable(getWS(ctx));
}
@Override
public void exitGroupTypeNameLabel(BallerinaParser.GroupTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.markTypeNodeAsGrouped(getWS(ctx));
}
@Override
public void enterInclusiveRecordTypeDescriptor(BallerinaParser.InclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordType();
}
@Override
public void exitInclusiveRecordTypeDescriptor(BallerinaParser.InclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
this.pkgBuilder.addRecordType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous, false,
false);
}
@Override
public void enterExclusiveRecordTypeDescriptor(BallerinaParser.ExclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordType();
}
@Override
public void exitExclusiveRecordTypeDescriptor(BallerinaParser.ExclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
boolean hasRestField = ctx.recordRestFieldDefinition() != null;
this.pkgBuilder.addRecordType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous,
hasRestField, true);
}
@Override
public void exitSimpleTypeName(BallerinaParser.SimpleTypeNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.referenceTypeName() != null || ctx.valueTypeName() != null) {
return;
}
this.pkgBuilder.addValueType(getCurrentPos(ctx), getWS(ctx), ctx.getChild(0).getText());
}
@Override
public void exitUserDefineTypeName(BallerinaParser.UserDefineTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addUserDefineType(getWS(ctx));
}
@Override
public void exitValueTypeName(BallerinaParser.ValueTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addValueType(getCurrentPos(ctx), getWS(ctx), ctx.getText());
}
@Override
public void exitBuiltInReferenceTypeName(BallerinaParser.BuiltInReferenceTypeNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.functionTypeName() != null) {
return;
}
if (ctx.errorTypeName() != null) {
return;
}
String typeName = ctx.getChild(0).getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (ctx.typeName() != null) {
this.pkgBuilder.addConstraintTypeWithTypeName(pos, getWS(ctx), typeName);
} else {
this.pkgBuilder.addBuiltInReferenceType(pos, getWS(ctx), typeName);
}
}
@Override
public void exitErrorTypeName(BallerinaParser.ErrorTypeNameContext ctx) {
if (isInErrorState) {
return;
}
boolean reasonTypeExists = !ctx.typeName().isEmpty();
boolean detailsTypeExists = ctx.typeName().size() > 1;
boolean isAnonymous = !(ctx.parent.parent.parent.parent.parent.parent
instanceof BallerinaParser.FiniteTypeContext) && reasonTypeExists;
this.pkgBuilder.addErrorType(getCurrentPos(ctx), getWS(ctx), reasonTypeExists, detailsTypeExists, isAnonymous);
}
@Override
public void exitFunctionTypeName(BallerinaParser.FunctionTypeNameContext ctx) {
if (isInErrorState) {
return;
}
boolean paramsAvail = false, retParamAvail = false, restParamAvail = false;
if (ctx.parameterList() != null) {
paramsAvail = ctx.parameterList().parameter().size() > 0;
if (ctx.parameterList().restParameter() != null) {
restParamAvail = true;
}
} else if (ctx.parameterTypeNameList() != null) {
paramsAvail = ctx.parameterTypeNameList().parameterTypeName().size() > 0;
if (ctx.parameterTypeNameList().restParameterTypeName() != null) {
restParamAvail = true;
}
}
if (ctx.returnParameter() != null) {
retParamAvail = true;
}
this.pkgBuilder.addFunctionType(getCurrentPos(ctx), getWS(ctx), paramsAvail, restParamAvail, retParamAvail);
}
/**
* {@inheritDoc}
*/
@Override
public void enterAnnotationAttachment(BallerinaParser.AnnotationAttachmentContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAnnotationAttachment(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAnnotationAttachment(BallerinaParser.AnnotationAttachmentContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.setAnnotationAttachmentName(getWS(ctx), ctx.recordLiteral() != null,
getCurrentPos(ctx), false);
}
@Override
public void exitErrorBindingPattern(BallerinaParser.ErrorBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.typeName() != null) {
if (ctx.errorFieldBindingPatterns().errorRestBindingPattern() != null) {
String restIdName = ctx.errorFieldBindingPatterns().errorRestBindingPattern().Identifier().getText();
DiagnosticPos restPos = getCurrentPos(ctx.errorFieldBindingPatterns().errorRestBindingPattern());
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), restIdName, restPos);
} else {
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), null, null);
}
return;
}
String reasonIdentifier = ctx.Identifier().getText();
DiagnosticPos currentPos = getCurrentPos(ctx);
String restIdentifier = null;
DiagnosticPos restParamPos = null;
if (ctx.errorRestBindingPattern() != null) {
restIdentifier = ctx.errorRestBindingPattern().Identifier().getText();
restParamPos = getCurrentPos(ctx.errorRestBindingPattern());
}
this.pkgBuilder.addErrorVariable(currentPos, getWS(ctx), reasonIdentifier, restIdentifier, false, false,
restParamPos);
}
@Override
public void enterErrorBindingPattern(BallerinaParser.ErrorBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startErrorBindingNode();
}
@Override
public void enterErrorMatchPattern(BallerinaParser.ErrorMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startErrorBindingNode();
}
@Override
public void exitSimpleMatchPattern(BallerinaParser.SimpleMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endSimpleMatchPattern(getWS(ctx));
}
@Override
public void exitErrorArgListMatchPattern(BallerinaParser.ErrorArgListMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
String restIdentifier = null;
DiagnosticPos restParamPos = null;
if (ctx.restMatchPattern() != null) {
restIdentifier = ctx.restMatchPattern().Identifier().getText();
restParamPos = getCurrentPos(ctx.restMatchPattern());
}
String reasonIdentifier = null;
boolean reasonVar = false;
boolean constReasonMatchPattern = false;
if (ctx.simpleMatchPattern() != null) {
reasonVar = ctx.simpleMatchPattern().VAR() != null;
if (ctx.simpleMatchPattern().Identifier() != null) {
reasonIdentifier = ctx.simpleMatchPattern().Identifier().getText();
} else {
reasonIdentifier = ctx.simpleMatchPattern().QuotedStringLiteral().getText();
constReasonMatchPattern = true;
}
}
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), reasonIdentifier,
restIdentifier, reasonVar, constReasonMatchPattern, restParamPos);
}
@Override
public void exitErrorMatchPattern(BallerinaParser.ErrorMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean isIndirectErrorMatchPatern = ctx.typeName() != null;
this.pkgBuilder.endErrorMatchPattern(getWS(ctx), isIndirectErrorMatchPatern);
}
@Override
public void exitErrorDetailBindingPattern(BallerinaParser.ErrorDetailBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
String bindingVarName = null;
if (ctx.bindingPattern() != null && ctx.bindingPattern().Identifier() != null) {
bindingVarName = ctx.bindingPattern().Identifier().getText();
}
this.pkgBuilder.addErrorDetailBinding(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
bindingVarName);
}
@Override
public void exitErrorRefBindingPattern(BallerinaParser.ErrorRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
int numNamedArgs = ctx.errorNamedArgRefPattern().size();
boolean reasonRefAvailable = ctx.variableReference() != null;
boolean restPatternAvailable = ctx.errorRefRestPattern() != null;
boolean indirectErrorRefPattern = ctx.typeName() != null;
this.pkgBuilder.addErrorVariableReference(getCurrentPos(ctx), getWS(ctx),
numNamedArgs, reasonRefAvailable, restPatternAvailable, indirectErrorRefPattern);
}
@Override
public void exitErrorNamedArgRefPattern(BallerinaParser.ErrorNamedArgRefPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNamedArgument(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText());
}
@Override
public void exitListBindingPattern(BallerinaParser.ListBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean restBindingAvailable = ctx.restBindingPattern() != null;
this.pkgBuilder.addTupleVariable(getCurrentPos(ctx), getWS(ctx), ctx.bindingPattern().size(),
restBindingAvailable);
}
@Override
public void exitListRefBindingPattern(BallerinaParser.ListRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean restPatternAvailable = ctx.listRefRestPattern() != null;
this.pkgBuilder.addTupleVariableReference(getCurrentPos(ctx), getWS(ctx), ctx.bindingRefPattern().size(),
restPatternAvailable);
}
@Override
public void enterRecordBindingPattern(BallerinaParser.RecordBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordVariableList();
}
@Override
public void exitRecordBindingPattern(BallerinaParser.RecordBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean hasRestBindingPattern = ctx.entryBindingPattern().restBindingPattern() != null;
this.pkgBuilder.addRecordVariable(getCurrentPos(ctx), getWS(ctx), hasRestBindingPattern);
}
@Override
public void enterRecordRefBindingPattern(BallerinaParser.RecordRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordVariableReferenceList();
}
@Override
public void exitRecordRefBindingPattern(BallerinaParser.RecordRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean hasRestBindingPattern = ctx.entryRefBindingPattern().restRefBindingPattern() != null;
this.pkgBuilder.addRecordVariableReference(getCurrentPos(ctx), getWS(ctx), hasRestBindingPattern);
}
@Override
public void exitBindingPattern(BallerinaParser.BindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if ((ctx.Identifier() != null) && ((ctx.parent instanceof BallerinaParser.ListBindingPatternContext)
|| (ctx.parent instanceof BallerinaParser.FieldBindingPatternContext)
|| (ctx.parent instanceof BallerinaParser.MatchPatternClauseContext))) {
this.pkgBuilder.addBindingPatternMemberVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()));
} else if (ctx.Identifier() != null) {
this.pkgBuilder.addBindingPatternNameWhitespace(getWS(ctx));
}
}
@Override
public void exitFieldBindingPattern(BallerinaParser.FieldBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFieldBindingMemberVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()),
ctx.bindingPattern() != null);
}
@Override
public void exitFieldRefBindingPattern(BallerinaParser.FieldRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFieldRefBindingMemberVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
ctx.bindingRefPattern() != null);
}
@Override
public void exitRestBindingPattern(BallerinaParser.RestBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
this.pkgBuilder.addBindingPatternMemberVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()));
}
}
@Override
public void exitVariableDefinitionStatement(BallerinaParser.VariableDefinitionStatementContext ctx) {
if (isInErrorState) {
return;
}
boolean isFinal = ctx.FINAL() != null;
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
if (ctx.Identifier() != null) {
this.pkgBuilder.addSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()),
isFinal, isExpressionAvailable, isDeclaredWithVar);
} else if (ctx.bindingPattern().Identifier() != null) {
this.pkgBuilder.addSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
ctx.bindingPattern().Identifier().getText(),
getCurrentPos(ctx.bindingPattern().Identifier()),
isFinal, isExpressionAvailable, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.addRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.addErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().listBindingPattern() != null) {
this.pkgBuilder.addTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
}
}
@Override
public void enterRecordLiteral(BallerinaParser.RecordLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMapStructLiteral();
}
@Override
public void exitRecordLiteral(BallerinaParser.RecordLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addMapStructLiteral(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitRecordField(BallerinaParser.RecordFieldContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() == null) {
this.pkgBuilder.addKeyValueRecordField(getWS(ctx), ctx.recordKey().LEFT_BRACKET() != null);
} else {
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.addNameReference(pos, getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createBLangRecordVarRefNameField(pos, getWS(ctx));
this.pkgBuilder.addIdentifierRecordField();
}
}
@Override
public void exitRecordKey(BallerinaParser.RecordKeyContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.addNameReference(pos, getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createSimpleVariableReference(pos, getWS(ctx));
} else if (ctx.LEFT_BRACKET() != null) {
this.pkgBuilder.addRecordKeyWS(getWS(ctx));
}
}
@Override
public void enterTableLiteral(BallerinaParser.TableLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTableLiteral();
}
@Override
public void exitTableColumnDefinition(BallerinaParser.TableColumnDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableColumnDefinition(getWS(ctx));
}
@Override
public void exitTableColumn(BallerinaParser.TableColumnContext ctx) {
if (isInErrorState) {
return;
}
String columnName;
int childCount = ctx.getChildCount();
if (childCount == 2) {
boolean keyColumn = KEYWORD_KEY.equals(ctx.getChild(0).getText());
if (keyColumn) {
columnName = escapeQuotedIdentifier(ctx.getChild(1).getText());
this.pkgBuilder.addTableColumn(columnName, getCurrentPos(ctx), getWS(ctx));
this.pkgBuilder.markPrimaryKeyColumn(columnName);
} else {
DiagnosticPos pos = getCurrentPos(ctx);
dlog.error(pos, DiagnosticCode.TABLE_KEY_EXPECTED);
}
} else {
columnName = escapeQuotedIdentifier(ctx.getChild(0).getText());
this.pkgBuilder.addTableColumn(columnName, getCurrentPos(ctx), getWS(ctx));
}
}
@Override
public void exitTableDataArray(BallerinaParser.TableDataArrayContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableDataArray(getWS(ctx));
}
@Override
public void exitTableDataList(BallerinaParser.TableDataListContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.expressionList() != null) {
this.pkgBuilder.endTableDataRow(getWS(ctx));
}
}
@Override
public void exitTableData(BallerinaParser.TableDataContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableDataList(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTableLiteral(BallerinaParser.TableLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTableLiteral(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterStreamConstructorExpr(BallerinaParser.StreamConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startStreamConstructor(getCurrentPos(ctx), diagnosticSrc.pkgID);
}
@Override
public void exitStreamConstructorExpr(BallerinaParser.StreamConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endStreamConstructor(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitListConstructorExpr(BallerinaParser.ListConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.expressionList() != null;
this.pkgBuilder.addListConstructorExpression(getCurrentPos(ctx), getWS(ctx), argsAvailable);
}
@Override
public void enterLetExpression(BallerinaParser.LetExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLetVarList();
}
@Override
public void enterLetExpr(BallerinaParser.LetExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLetVarList();
}
@Override
public void exitLetExpression(BallerinaParser.LetExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLetExpression(getCurrentPos(ctx));
}
@Override
public void exitLetVarDecl(BallerinaParser.LetVarDeclContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
if (ctx.bindingPattern().Identifier() != null) {
this.pkgBuilder.addLetVariableDecl(getCurrentPos(ctx), getWS(ctx),
ctx.bindingPattern().Identifier().getText(),
getCurrentPos(ctx.bindingPattern().Identifier()),
isExpressionAvailable, isDeclaredWithVar);
}
}
@Override
public void exitTypeInitExpr(BallerinaParser.TypeInitExprContext ctx) {
if (isInErrorState) {
return;
}
String initName = ctx.NEW().getText();
boolean typeAvailable = ctx.userDefineTypeName() != null;
boolean argsAvailable = ctx.invocationArgList() != null;
this.pkgBuilder.addTypeInitExpression(getCurrentPos(ctx), getWS(ctx), initName, typeAvailable, argsAvailable);
}
@Override
/**
* {@inheritDoc}
*/
@Override
public void exitAssignmentStatement(BallerinaParser.AssignmentStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addAssignmentStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitListDestructuringStatement(BallerinaParser.ListDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTupleDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitRecordDestructuringStatement(BallerinaParser.RecordDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRecordDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitErrorDestructuringStatement(BallerinaParser.ErrorDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addErrorDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompoundAssignmentStatement(BallerinaParser.CompoundAssignmentStatementContext ctx) {
if (isInErrorState) {
return;
}
String compoundOperatorText = ctx.compoundOperator().getText();
String operator = compoundOperatorText.substring(0, compoundOperatorText.length() - 1);
this.pkgBuilder.addCompoundAssignmentStatement(getCurrentPos(ctx), getWS(ctx), operator);
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompoundOperator(BallerinaParser.CompoundOperatorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addCompoundOperator(getWS(ctx));
}
@Override
public void enterVariableReferenceList(BallerinaParser.VariableReferenceListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
@Override
public void exitVariableReferenceList(BallerinaParser.VariableReferenceListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
/**
* {@inheritDoc}
*/
@Override
public void enterIfElseStatement(BallerinaParser.IfElseStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startIfElseNode(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitIfElseStatement(BallerinaParser.IfElseStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endIfElseNode(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitIfClause(BallerinaParser.IfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addIfBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterElseIfClause(BallerinaParser.ElseIfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startIfElseNode(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitElseIfClause(BallerinaParser.ElseIfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addElseIfBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterElseClause(BallerinaParser.ElseClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitElseClause(BallerinaParser.ElseClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addElseBlock(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterMatchStatement(BallerinaParser.MatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createMatchNode(getCurrentPos(ctx));
}
@Override
public void exitMatchStatement(BallerinaParser.MatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.completeMatchNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterMatchPatternClause(BallerinaParser.MatchPatternClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMatchStmtPattern();
}
@Override
public void exitMatchPatternClause(BallerinaParser.MatchPatternClauseContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.bindingPattern() != null || ctx.errorMatchPattern() != null) {
boolean isTypeGuardPresent = ctx.IF() != null;
this.pkgBuilder.addMatchStmtStructuredBindingPattern(getCurrentPos(ctx), getWS(ctx), isTypeGuardPresent);
return;
}
this.pkgBuilder.addMatchStmtStaticBindingPattern(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterForeachStatement(BallerinaParser.ForeachStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startForeachStatement();
}
@Override
public void exitForeachStatement(BallerinaParser.ForeachStatementContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
if (ctx.bindingPattern().Identifier() != null) {
String identifier = ctx.bindingPattern().Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.bindingPattern().Identifier());
this.pkgBuilder.addForeachStatementWithSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
identifier, identifierPos,
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.addForeachStatementWithRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.addForeachStatementWithErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else {
this.pkgBuilder.addForeachStatementWithTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
}
}
@Override
public void exitIntRangeExpression(BallerinaParser.IntRangeExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addIntRangeExpression(getCurrentPos(ctx), getWS(ctx),
ctx.LEFT_PARENTHESIS() == null, ctx.RIGHT_PARENTHESIS() == null,
ctx.expression(1) == null);
}
/**
* {@inheritDoc}
*/
@Override
public void enterWhileStatement(BallerinaParser.WhileStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWhileStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitWhileStatement(BallerinaParser.WhileStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWhileStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitContinueStatement(BallerinaParser.ContinueStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addContinueStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitBreakStatement(BallerinaParser.BreakStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addBreakStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterForkJoinStatement(BallerinaParser.ForkJoinStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startForkJoinStmt();
}
@Override
public void exitForkJoinStatement(BallerinaParser.ForkJoinStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addForkJoinStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterTryCatchStatement(BallerinaParser.TryCatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTryCatchFinallyStmt();
}
@Override
public void exitTryCatchStatement(BallerinaParser.TryCatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTryCatchFinallyStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterCatchClauses(BallerinaParser.CatchClausesContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTryClause(getCurrentPos(ctx));
}
@Override
public void enterCatchClause(BallerinaParser.CatchClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startCatchClause();
}
@Override
public void exitCatchClause(BallerinaParser.CatchClauseContext ctx) {
if (isInErrorState) {
return;
}
String paramName = ctx.Identifier().getText();
this.pkgBuilder.addCatchClause(getCurrentPos(ctx), getWS(ctx), paramName);
}
@Override
public void enterFinallyClause(BallerinaParser.FinallyClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startFinallyBlock();
}
@Override
public void exitFinallyClause(BallerinaParser.FinallyClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFinallyBlock(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitThrowStatement(BallerinaParser.ThrowStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addThrowStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitPanicStatement(BallerinaParser.PanicStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addPanicStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnStatement(BallerinaParser.ReturnStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addReturnStatement(this.getCurrentPos(ctx), getWS(ctx), ctx.expression() != null);
}
@Override
public void exitWorkerReceiveExpression(BallerinaParser.WorkerReceiveExpressionContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerReceiveExpr(getCurrentPos(ctx), getWS(ctx), workerName, ctx.expression() != null);
}
@Override
public void exitFlushWorker(BallerinaParser.FlushWorkerContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
this.pkgBuilder.addWorkerFlushExpr(getCurrentPos(ctx), getWS(ctx), workerName);
}
@Override
public void exitWorkerSendAsyncStatement(BallerinaParser.WorkerSendAsyncStatementContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerSendStmt(getCurrentPos(ctx), getWS(ctx), workerName, ctx.expression().size() > 1);
}
@Override
public void exitWorkerSendSyncExpression(BallerinaParser.WorkerSendSyncExpressionContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerSendSyncExpr(getCurrentPos(ctx), getWS(ctx), workerName);
}
@Override
public void exitWaitExpression(BallerinaParser.WaitExpressionContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.waitForCollection() != null) {
this.pkgBuilder.handleWaitForAll(getCurrentPos(ctx), getWS(ctx));
} else {
this.pkgBuilder.handleWait(getCurrentPos(ctx), getWS(ctx));
}
}
@Override
public void enterWaitForCollection(BallerinaParser.WaitForCollectionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWaitForAll();
}
@Override
public void exitWaitKeyValue(BallerinaParser.WaitKeyValueContext ctx) {
if (isInErrorState) {
return;
}
boolean containsExpr = ctx.expression() != null;
this.pkgBuilder.addKeyValueToWaitForAll(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
containsExpr);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlAttribVariableReference(BallerinaParser.XmlAttribVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean isSingleAttrRef = ctx.xmlAttrib().expression() != null;
this.pkgBuilder.createXmlAttributesRefExpr(getCurrentPos(ctx), getWS(ctx), isSingleAttrRef);
}
@Override
public void exitSimpleVariableReference(BallerinaParser.SimpleVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createSimpleVariableReference(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitInvocation(BallerinaParser.InvocationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addInvocationWS(getWS(ctx));
}
@Override
public void exitStringFunctionInvocationReference(BallerinaParser.StringFunctionInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node = ctx.QuotedStringLiteral();
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String actualText = node.getText();
actualText = actualText.substring(1, actualText.length() - 1);
actualText = StringEscapeUtils.unescapeJava(actualText);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, actualText, node.getText());
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
@Override
public void exitGroupStringFunctionInvocationReference(GroupStringFunctionInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node = ctx.QuotedStringLiteral();
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String actualText = node.getText();
actualText = actualText.substring(1, actualText.length() - 1);
actualText = StringEscapeUtils.unescapeJava(actualText);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, actualText, node.getText());
InvocationContext invocation = ctx.invocation();
boolean argsAvailable = invocation.invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = invocation.anyIdentifierName();
String invocationText = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(invocation), getWS(invocation), invocationText,
argsAvailable, getCurrentPos(identifierContext));
this.pkgBuilder.createGroupExpression(getCurrentPos(node), getWS(ctx));
}
@Override
public void exitFunctionInvocation(BallerinaParser.FunctionInvocationContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocationArgList() != null;
this.pkgBuilder.createFunctionInvocation(getCurrentPos(ctx), getWS(ctx), argsAvailable);
}
@Override
public void exitFieldVariableReference(BallerinaParser.FieldVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
createFieldBasedAccessNode(ctx, ctx.field());
}
@Override
public void exitGroupFieldVariableReference(BallerinaParser.GroupFieldVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
FieldContext field = ctx.field();
VariableReferenceContext groupExpression = ctx.variableReference();
createFieldBasedAccessNode(field, field);
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
private void createFieldBasedAccessNode(ParserRuleContext ctx, FieldContext field) {
String fieldName;
DiagnosticPos fieldNamePos;
FieldKind fieldType;
if (field.Identifier() != null) {
fieldName = field.Identifier().getText();
fieldNamePos = getCurrentPos(field);
fieldType = FieldKind.SINGLE;
} else {
fieldName = field.MUL().getText();
fieldNamePos = getCurrentPos(field);
fieldType = FieldKind.ALL;
}
this.pkgBuilder.createFieldBasedAccessNode(getCurrentPos(ctx), getWS(ctx), fieldName, fieldNamePos,
fieldType, field.OPTIONAL_FIELD_ACCESS() != null);
}
@Override
public void exitMapArrayVariableReference(BallerinaParser.MapArrayVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createIndexBasedAccessNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitGroupMapArrayVariableReference(BallerinaParser.GroupMapArrayVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
IndexContext index = ctx.index();
VariableReferenceContext groupExpression = ctx.variableReference();
this.pkgBuilder.createIndexBasedAccessNode(getCurrentPos(index), getWS(index));
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
@Override
public void exitReservedWord(BallerinaParser.ReservedWordContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startInvocationNode(getWS(ctx));
}
@Override
public void exitAnyIdentifierName(BallerinaParser.AnyIdentifierNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.reservedWord() == null) {
this.pkgBuilder.startInvocationNode(getWS(ctx));
}
}
@Override
public void exitInvocationReference(BallerinaParser.InvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
@Override
public void exitGroupInvocationReference(BallerinaParser.GroupInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
InvocationContext invocation = ctx.invocation();
VariableReferenceContext groupExpression = ctx.variableReference();
boolean argsAvailable = invocation.invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = invocation.anyIdentifierName();
String invocationText = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(invocation), getWS(invocation),
invocationText, argsAvailable, getCurrentPos(identifierContext));
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
@Override
public void exitTypeDescExprInvocationReference(BallerinaParser.TypeDescExprInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
/**
* {@inheritDoc}
*/
@Override
public void enterInvocationArgList(BallerinaParser.InvocationArgListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitInvocationArgList(BallerinaParser.InvocationArgListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
public void enterExpressionList(BallerinaParser.ExpressionListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
@Override
public void exitExpressionList(BallerinaParser.ExpressionListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
@Override
public void exitExpressionStmt(BallerinaParser.ExpressionStmtContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addExpressionStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterTransactionStatement(BallerinaParser.TransactionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTransactionStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionStatement(BallerinaParser.TransactionStatementContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.endTransactionStmt(pos, getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionClause(BallerinaParser.TransactionClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTransactionBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionPropertyInitStatementList(
BallerinaParser.TransactionPropertyInitStatementListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTransactionPropertyInitStatementList(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterLockStatement(BallerinaParser.LockStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLockStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitLockStatement(BallerinaParser.LockStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLockStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterOnretryClause(BallerinaParser.OnretryClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startOnretryBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitOnretryClause(BallerinaParser.OnretryClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addOnretryBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterCommittedClause(BallerinaParser.CommittedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startCommittedBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitCommittedClause(BallerinaParser.CommittedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endCommittedBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterAbortedClause(BallerinaParser.AbortedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAbortedBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitAbortedClause(BallerinaParser.AbortedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endAbortedBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAbortStatement(BallerinaParser.AbortStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addAbortStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRetryStatement(BallerinaParser.RetryStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRetryStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRetriesStatement(BallerinaParser.RetriesStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRetryCountExpression(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterNamespaceDeclaration(BallerinaParser.NamespaceDeclarationContext ctx) {
}
@Override
public void exitNamespaceDeclaration(BallerinaParser.NamespaceDeclarationContext ctx) {
if (isInErrorState) {
return;
}
boolean isTopLevel = ctx.parent instanceof BallerinaParser.CompilationUnitContext;
String namespaceUri = ctx.QuotedStringLiteral().getText();
DiagnosticPos pos = getCurrentPos(ctx);
namespaceUri = namespaceUri.substring(1, namespaceUri.length() - 1);
namespaceUri = StringEscapeUtils.unescapeJava(namespaceUri);
String prefix = (ctx.Identifier() != null) ? ctx.Identifier().getText() : null;
DiagnosticPos prefixPos = (ctx.Identifier() != null) ? getCurrentPos(ctx.Identifier()) : null;
this.pkgBuilder.addXMLNSDeclaration(pos, getWS(ctx), namespaceUri, prefix, prefixPos, isTopLevel);
}
@Override
public void exitBinaryDivMulModExpression(BallerinaParser.BinaryDivMulModExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryOrExpression(BallerinaParser.BinaryOrExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryRefEqualExpression(BallerinaParser.BinaryRefEqualExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryEqualExpression(BallerinaParser.BinaryEqualExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitStaticMatchOrExpression(BallerinaParser.StaticMatchOrExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitStaticMatchIdentifierLiteral(BallerinaParser.StaticMatchIdentifierLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createSimpleVariableReference(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTypeDescExpr(BallerinaParser.TypeDescExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeAccessExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitActionInvocation(BallerinaParser.ActionInvocationContext ctx) {
if (isInErrorState) {
return;
}
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.createActionInvocationNode(getCurrentPos(ctx), getWS(ctx), ctx.START() != null,
numAnnotations);
}
@Override
public void exitBinaryAndExpression(BallerinaParser.BinaryAndExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryAddSubExpression(BallerinaParser.BinaryAddSubExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBitwiseExpression(BallerinaParser.BitwiseExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBitwiseShiftExpression(BallerinaParser.BitwiseShiftExpressionContext ctx) {
if (isInErrorState) {
return;
}
StringBuilder operator = new StringBuilder();
for (int i = 1; i < ctx.getChildCount() - 1; i++) {
operator.append(ctx.getChild(i).getText());
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), operator.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void exitTypeConversionExpression(BallerinaParser.TypeConversionExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeConversionExpr(getCurrentPos(ctx), getWS(ctx),
ctx.annotationAttachment().size(), ctx.typeName() != null);
}
@Override
public void exitBinaryCompareExpression(BallerinaParser.BinaryCompareExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitIntegerRangeExpression(BallerinaParser.IntegerRangeExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitUnaryExpression(BallerinaParser.UnaryExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createUnaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(0).getText());
}
@Override
public void exitTypeTestExpression(BallerinaParser.TypeTestExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeTestExpression(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitAnnotAccessExpression(BallerinaParser.AnnotAccessExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createAnnotAccessNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitGroupExpression(BallerinaParser.GroupExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createGroupExpression(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTernaryExpression(BallerinaParser.TernaryExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTernaryExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitCheckedExpression(BallerinaParser.CheckedExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createCheckedExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitCheckPanickedExpression(BallerinaParser.CheckPanickedExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createCheckPanickedExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitFromClause(BallerinaParser.FromClauseContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
if (ctx.bindingPattern().Identifier() != null) {
String identifier = ctx.bindingPattern().Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.bindingPattern().Identifier());
this.pkgBuilder.createFromClauseWithSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
identifier, identifierPos,
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.createFromClauseWithRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.createFromClauseWithErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else {
this.pkgBuilder.createFromClauseWithTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
}
}
@Override
public void exitWhereClause(BallerinaParser.WhereClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createWhereClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitSelectClause(BallerinaParser.SelectClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createSelectClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitDoClause(BallerinaParser.DoClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createDoClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitQueryExpr(BallerinaParser.QueryExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createQueryExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterQueryActionStatement(BallerinaParser.QueryActionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startDoActionStatement();
}
@Override
public void exitQueryActionStatement(BallerinaParser.QueryActionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createQueryActionStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitNameReference(BallerinaParser.NameReferenceContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier().size() == 2) {
String pkgName = ctx.Identifier(0).getText();
String name = ctx.Identifier(1).getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (Names.IGNORE.value.equals(pkgName)) {
dlog.error(pos, DiagnosticCode.INVALID_PACKAGE_NAME_QUALIFER, pkgName);
}
this.pkgBuilder.addNameReference(pos, getWS(ctx), pkgName, name);
} else {
String name = ctx.Identifier(0).getText();
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, name);
}
}
@Override
public void exitFunctionNameReference(BallerinaParser.FunctionNameReferenceContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
String pkgName = ctx.Identifier().getText();
String name = ctx.anyIdentifierName().getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (Names.IGNORE.value.equals(pkgName)) {
dlog.error(pos, DiagnosticCode.INVALID_PACKAGE_NAME_QUALIFER, pkgName);
}
this.pkgBuilder.addNameReference(pos, getWS(ctx), pkgName, name);
} else {
String name = ctx.anyIdentifierName().getText();
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameter(BallerinaParser.ReturnParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addReturnParam(getCurrentPos(ctx), getWS(ctx), ctx.annotationAttachment().size());
}
@Override
public void enterParameterTypeNameList(BallerinaParser.ParameterTypeNameListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterTypeNameList(BallerinaParser.ParameterTypeNameListContext ctx) {
if (isInErrorState) {
return;
}
ParserRuleContext parent = ctx.getParent();
boolean inFuncTypeSig = parent instanceof BallerinaParser.FunctionTypeNameContext ||
parent instanceof BallerinaParser.ReturnParameterContext &&
parent.parent instanceof BallerinaParser.FunctionTypeNameContext;
if (inFuncTypeSig) {
this.pkgBuilder.endFuncTypeParamList(getWS(ctx));
} else {
this.pkgBuilder.endCallableParamList(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterList(BallerinaParser.ParameterListContext ctx) {
if (isInErrorState) {
return;
}
ParserRuleContext parent = ctx.getParent();
boolean inFuncTypeSig = parent instanceof BallerinaParser.FunctionTypeNameContext ||
parent instanceof BallerinaParser.ReturnParameterContext &&
parent.parent instanceof BallerinaParser.FunctionTypeNameContext;
if (inFuncTypeSig) {
this.pkgBuilder.endFuncTypeParamList(getWS(ctx));
} else {
this.pkgBuilder.endCallableParamList(getWS(ctx));
}
}
private String fillWithZeros(String str) {
while (str.length() < 4) {
str = "0".concat(str);
}
return str;
}
/**
* {@inheritDoc}
*/
@Override
public void exitSimpleLiteral(BallerinaParser.SimpleLiteralContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node;
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
Object value;
BallerinaParser.IntegerLiteralContext integerLiteralContext = ctx.integerLiteral();
if (integerLiteralContext != null && (value = getIntegerLiteral(ctx, ctx.integerLiteral())) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.INT, value, ctx.getText());
} else if (ctx.floatingPointLiteral() != null) {
if ((node = ctx.floatingPointLiteral().DecimalFloatingPointNumber()) != null) {
String nodeValue = getNodeValue(ctx, node);
int literalTypeTag = NumericLiteralSupport.isDecimalDiscriminated(nodeValue)
? TypeTags.DECIMAL : TypeTags.FLOAT;
this.pkgBuilder.addLiteralValue(pos, ws, literalTypeTag, nodeValue, node.getText());
} else if ((node = ctx.floatingPointLiteral().HexadecimalFloatingPointLiteral()) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.FLOAT, getHexNodeValue(ctx, node), node.getText());
}
} else if ((node = ctx.BooleanLiteral()) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.BOOLEAN, Boolean.parseBoolean(node.getText()),
node.getText());
} else if ((node = ctx.QuotedStringLiteral()) != null) {
String text = node.getText();
text = text.substring(1, text.length() - 1);
String originalText = text;
Matcher matcher = pattern.matcher(text);
int position = 0;
while (matcher.find(position)) {
String hexStringVal = matcher.group(1);
int hexDecimalVal = Integer.parseInt(hexStringVal, 16);
if ((hexDecimalVal >= Constants.MIN_UNICODE && hexDecimalVal <= Constants.MIDDLE_LIMIT_UNICODE)
|| hexDecimalVal > Constants.MAX_UNICODE) {
String hexStringWithBraces = matcher.group(0);
int offset = originalText.indexOf(hexStringWithBraces) + 1;
dlog.error(new DiagnosticPos(diagnosticSrc, pos.sLine, pos.eLine, pos.sCol + offset,
pos.sCol + offset + hexStringWithBraces.length()),
DiagnosticCode.INVALID_UNICODE, hexStringWithBraces);
}
text = matcher.replaceFirst("\\\\u" + fillWithZeros(hexStringVal));
position = matcher.end() - 2;
matcher = pattern.matcher(text);
}
text = StringEscapeUtils.unescapeJava(text);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, text, node.getText());
} else if (ctx.NullLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.NIL, null, "null");
} else if (ctx.nilLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.NIL, null, "()");
} else if (ctx.blobLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.BYTE_ARRAY, ctx.blobLiteral().getText());
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitNamedArgs(BallerinaParser.NamedArgsContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNamedArgument(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText());
}
/**
* {@inheritDoc}
*/
@Override
public void exitRestArgs(BallerinaParser.RestArgsContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRestArgument(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlLiteral(BallerinaParser.XmlLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.attachXmlLiteralWS(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitComment(BallerinaParser.CommentContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLCommentTemplateText());
String endingString = getTemplateEndingStr(ctx.XMLCommentText());
this.pkgBuilder.createXMLCommentLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString);
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitElement(BallerinaParser.ElementContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitStartTag(BallerinaParser.StartTagContext ctx) {
if (isInErrorState) {
return;
}
boolean isRoot = ctx.parent.parent instanceof BallerinaParser.XmlItemContext;
this.pkgBuilder.startXMLElement(getCurrentPos(ctx), getWS(ctx), isRoot);
}
/**
* {@inheritDoc}
*/
@Override
public void exitCloseTag(BallerinaParser.CloseTagContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endXMLElement(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitEmptyTag(BallerinaParser.EmptyTagContext ctx) {
if (isInErrorState) {
return;
}
boolean isRoot = ctx.parent.parent instanceof BallerinaParser.XmlItemContext;
this.pkgBuilder.startXMLElement(getCurrentPos(ctx), getWS(ctx), isRoot);
}
/**
* {@inheritDoc}
*/
@Override
public void exitProcIns(BallerinaParser.ProcInsContext ctx) {
if (isInErrorState) {
return;
}
String targetQName = ctx.XML_TAG_SPECIAL_OPEN().getText();
targetQName = targetQName.substring(2, targetQName.length() - 1);
Stack<String> textFragments = getTemplateTextFragments(ctx.XMLPITemplateText());
String endingText = getTemplateEndingStr(ctx.XMLPIText());
endingText = endingText.substring(0, endingText.length() - 2);
this.pkgBuilder.createXMLPILiteral(getCurrentPos(ctx), getWS(ctx), targetQName, textFragments, endingText);
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitAttribute(BallerinaParser.AttributeContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createXMLAttribute(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitText(BallerinaParser.TextContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> textFragments = getTemplateTextFragments(ctx.XMLTemplateText());
String endingText = getTemplateEndingStr(ctx.XMLText());
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addXMLTextToElement(getCurrentPos(ctx), getWS(ctx), textFragments, endingText);
} else {
this.pkgBuilder.createXMLTextLiteral(getCurrentPos(ctx), getWS(ctx), textFragments, endingText);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlSingleQuotedString(BallerinaParser.XmlSingleQuotedStringContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLSingleQuotedTemplateString());
String endingString = getTemplateEndingStr(ctx.XMLSingleQuotedString());
this.pkgBuilder.createXMLQuotedLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString,
QuoteType.SINGLE_QUOTE);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlDoubleQuotedString(BallerinaParser.XmlDoubleQuotedStringContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLDoubleQuotedTemplateString());
String endingString = getTemplateEndingStr(ctx.XMLDoubleQuotedString());
this.pkgBuilder.createXMLQuotedLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString,
QuoteType.DOUBLE_QUOTE);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlQualifiedName(BallerinaParser.XmlQualifiedNameContext ctx) {
if (isInErrorState) {
return;
}
List<TerminalNode> qnames = ctx.XMLQName();
String prefix = null;
String localname;
if (qnames.size() > 1) {
prefix = qnames.get(0).getText();
localname = qnames.get(1).getText();
} else {
localname = qnames.get(0).getText();
}
this.pkgBuilder.createXMLQName(getCurrentPos(ctx), getWS(ctx), localname, prefix);
}
/**
* {@inheritDoc}
*/
@Override
public void exitStringTemplateLiteral(BallerinaParser.StringTemplateLiteralContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments;
String endingText = null;
StringTemplateContentContext contentContext = ctx.stringTemplateContent();
if (contentContext != null) {
stringFragments = getTemplateTextFragments(contentContext.StringTemplateExpressionStart());
endingText = getTemplateEndingStr(contentContext.StringTemplateText());
} else {
stringFragments = new Stack<>();
}
this.pkgBuilder.createStringTemplateLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingText);
}
/**
* {@inheritDoc}
*/
@Override
public void enterDocumentationString(BallerinaParser.DocumentationStringContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMarkdownDocumentationString(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationString(BallerinaParser.DocumentationStringContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endMarkdownDocumentationString(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationLine(BallerinaParser.DocumentationLineContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endMarkDownDocumentLine(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationContent(BallerinaParser.DocumentationContentContext ctx) {
if (isInErrorState) {
return;
}
String text = ctx.getText() != null ? ctx.getText() : "";
this.pkgBuilder.endMarkdownDocumentationText(getCurrentPos(ctx), getWS(ctx), text);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDocumentationLine(BallerinaParser.ParameterDocumentationLineContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endParameterDocumentationLine(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDocumentation(BallerinaParser.ParameterDocumentationContext ctx) {
if (isInErrorState) {
return;
}
String parameterName = ctx.docParameterName() != null ? ctx.docParameterName().getText() : "";
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endParameterDocumentation(getCurrentPos(ctx.docParameterName()), getWS(ctx), parameterName,
description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDescriptionLine(BallerinaParser.ParameterDescriptionLineContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endParameterDocumentationDescription(getWS(ctx), description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameterDocumentation(BallerinaParser.ReturnParameterDocumentationContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endReturnParameterDocumentation(getCurrentPos(ctx.getParent()), getWS(ctx), description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameterDescriptionLine(BallerinaParser.ReturnParameterDescriptionLineContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endReturnParameterDocumentationDescription(getWS(ctx), description);
}
@Override
public void exitTrapExpression(BallerinaParser.TrapExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTrapExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitVariableReferenceExpression(BallerinaParser.VariableReferenceExpressionContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.START() != null) {
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.markLastInvocationAsAsync(getCurrentPos(ctx), numAnnotations);
}
}
@Override
public void exitDocumentationReference(BallerinaParser.DocumentationReferenceContext ctx) {
if (isInErrorState) {
return;
}
BallerinaParser.ReferenceTypeContext referenceType = ctx.referenceType();
BallerinaParser.SingleBacktickedContentContext backtickedContent = ctx.singleBacktickedContent();
this.pkgBuilder.endDocumentationReference(getCurrentPos(ctx), referenceType.getText(),
backtickedContent.getText());
}
@Override
public void exitSingleBacktickedBlock(BallerinaParser.SingleBacktickedBlockContext ctx) {
if (isInErrorState) {
return;
}
BallerinaParser.SingleBacktickedContentContext backtickedContent = ctx.singleBacktickedContent();
this.pkgBuilder.endSingleBacktickedBlock(getCurrentPos(ctx), backtickedContent.getText());
}
/**
* {@inheritDoc}
*/
@Override
public void exitElvisExpression(BallerinaParser.ElvisExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createElvisExpr(getCurrentPos(ctx), getWS(ctx));
}
private DiagnosticPos getCurrentPos(ParserRuleContext ctx) {
int startLine = ctx.getStart().getLine();
int startCol = ctx.getStart().getCharPositionInLine() + 1;
int endLine = -1;
int endCol = -1;
Token stop = ctx.getStop();
if (stop != null) {
endLine = stop.getLine();
endCol = stop.getCharPositionInLine() + (stop.getStopIndex() - stop.getStartIndex() + 1) + 1;
}
return new DiagnosticPos(diagnosticSrc, startLine, endLine, startCol, endCol);
}
private DiagnosticPos getCurrentPos(TerminalNode node) {
Token symbol = node.getSymbol();
int startLine = symbol.getLine();
int startCol = symbol.getCharPositionInLine() + 1;
int endLine = startLine;
int endCol = startCol + symbol.getText().length();
return new DiagnosticPos(diagnosticSrc, startLine, endLine, startCol, endCol);
}
protected Set<Whitespace> getWS(ParserRuleContext ctx) {
return null;
}
private Stack<String> getTemplateTextFragments(List<TerminalNode> nodes) {
Stack<String> templateStrFragments = new Stack<>();
nodes.forEach(node -> {
if (node == null) {
templateStrFragments.push(null);
} else {
String str = node.getText();
templateStrFragments.push(str.substring(0, str.length() - 2));
}
});
return templateStrFragments;
}
private String getTemplateEndingStr(TerminalNode node) {
return node == null ? null : node.getText();
}
private String getTemplateEndingStr(List<TerminalNode> nodes) {
StringJoiner joiner = new StringJoiner("");
nodes.forEach(node -> joiner.add(node.getText()));
return joiner.toString();
}
private String getNodeValue(ParserRuleContext ctx, TerminalNode node) {
String op = ctx.getChild(0).getText();
String value = node.getText();
if (op != null && "-".equals(op)) {
value = "-" + value;
}
return value;
}
private String getHexNodeValue(ParserRuleContext ctx, TerminalNode node) {
String value = getNodeValue(ctx, node);
if (!(value.contains("p") || value.contains("P"))) {
value = value + "p0";
}
return value;
}
private Object getIntegerLiteral(ParserRuleContext simpleLiteralContext,
BallerinaParser.IntegerLiteralContext integerLiteralContext) {
if (integerLiteralContext.DecimalIntegerLiteral() != null) {
String nodeValue = getNodeValue(simpleLiteralContext, integerLiteralContext.DecimalIntegerLiteral());
return parseLong(simpleLiteralContext, nodeValue, nodeValue, 10, DiagnosticCode.INTEGER_TOO_SMALL,
DiagnosticCode.INTEGER_TOO_LARGE);
} else if (integerLiteralContext.HexIntegerLiteral() != null) {
String nodeValue = getNodeValue(simpleLiteralContext, integerLiteralContext.HexIntegerLiteral());
String processedNodeValue = nodeValue.toLowerCase().replace("0x", "");
return parseLong(simpleLiteralContext, nodeValue, processedNodeValue, 16,
DiagnosticCode.HEXADECIMAL_TOO_SMALL, DiagnosticCode.HEXADECIMAL_TOO_LARGE);
}
return null;
}
private Object parseLong(ParserRuleContext context, String originalNodeValue, String processedNodeValue, int radix,
DiagnosticCode code1, DiagnosticCode code2) {
try {
return Long.parseLong(processedNodeValue, radix);
} catch (Exception e) {
DiagnosticPos pos = getCurrentPos(context);
Set<Whitespace> ws = getWS(context);
if (originalNodeValue.startsWith("-")) {
dlog.error(pos, code1, originalNodeValue);
} else {
dlog.error(pos, code2, originalNodeValue);
}
}
return originalNodeValue;
}
/**
* Mark that this listener is in error state.
*/
public void setErrorState() {
this.isInErrorState = true;
}
/**
* Mark that this listener is not in an error state.
*/
public void unsetErrorState() {
this.isInErrorState = false;
}
boolean isInErrorState() {
return this.isInErrorState;
}
}
|
class BLangParserListener extends BallerinaParserBaseListener {
private static final String KEYWORD_PUBLIC = "public";
private static final String KEYWORD_KEY = "key";
private BLangPackageBuilder pkgBuilder;
private BDiagnosticSource diagnosticSrc;
private BLangDiagnosticLog dlog;
private List<String> pkgNameComps;
private String pkgVersion;
private boolean isInErrorState = false;
private Pattern pattern = Pattern.compile(Constants.UNICODE_REGEX);
BLangParserListener(CompilerContext context, CompilationUnitNode compUnit, BDiagnosticSource diagnosticSource) {
this.pkgBuilder = new BLangPackageBuilder(context, compUnit);
this.diagnosticSrc = diagnosticSource;
this.dlog = BLangDiagnosticLog.getInstance(context);
}
@Override
public void enterParameterList(BallerinaParser.ParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
@Override
public void exitParameter(BallerinaParser.ParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addSimpleVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), false,
ctx.annotationAttachment().size(), ctx.PUBLIC() != null);
}
/**
* {@inheritDoc}
*/
@Override
public void enterFormalParameterList(BallerinaParser.FormalParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitFormalParameterList(BallerinaParser.FormalParameterListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFormalParameterList(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDefaultableParameter(BallerinaParser.DefaultableParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addDefaultableParam(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRestParameter(BallerinaParser.RestParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRestParam(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), ctx.annotationAttachment().size());
}
@Override
public void exitRestParameterTypeName(BallerinaParser.RestParameterTypeNameContext ctx) {
if (isInErrorState) {
return;
}
pkgBuilder.addRestParam(getCurrentPos(ctx), getWS(ctx), null, null, 0);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterTypeName(BallerinaParser.ParameterTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addSimpleVar(getCurrentPos(ctx), getWS(ctx), null, null, false, 0);
}
@Override
public void enterCompilationUnit(BallerinaParser.CompilationUnitContext ctx) {
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompilationUnit(BallerinaParser.CompilationUnitContext ctx) {
this.pkgBuilder.endCompilationUnit(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitPackageName(BallerinaParser.PackageNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgNameComps = new ArrayList<>();
ctx.Identifier().forEach(e -> pkgNameComps.add(e.getText()));
this.pkgVersion = ctx.version() != null ? ctx.version().versionPattern().getText() : null;
}
/**
* {@inheritDoc}
*/
@Override
public void exitImportDeclaration(BallerinaParser.ImportDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String alias = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
BallerinaParser.OrgNameContext orgNameContext = ctx.orgName();
if (orgNameContext == null) {
this.pkgBuilder.addImportPackageDeclaration(getCurrentPos(ctx), getWS(ctx),
null, this.pkgNameComps, this.pkgVersion, alias);
} else {
this.pkgBuilder.addImportPackageDeclaration(getCurrentPos(ctx), getWS(ctx),
orgNameContext.getText(), this.pkgNameComps, this.pkgVersion, alias);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitServiceDefinition(BallerinaParser.ServiceDefinitionContext ctx) {
if (isInErrorState) {
return;
}
final DiagnosticPos serviceDefPos = getCurrentPos(ctx);
final String serviceVarName = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
final DiagnosticPos varPos =
ctx.Identifier() != null ? getCurrentPos(ctx.Identifier()) : serviceDefPos;
this.pkgBuilder.endServiceDef(serviceDefPos, getWS(ctx), serviceVarName, varPos, false);
}
/**
* {@inheritDoc}
*/
@Override
public void enterServiceBody(BallerinaParser.ServiceBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startServiceDef(getCurrentPos(ctx));
this.pkgBuilder.startObjectType();
}
/**
* {@inheritDoc}
*/
@Override
public void exitServiceBody(BallerinaParser.ServiceBodyContext ctx) {
if (isInErrorState) {
return;
}
boolean isFieldAnalyseRequired = (ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext
|| ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext)
|| ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
this.pkgBuilder
.addObjectType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, false, false, false, true);
}
/**
* {@inheritDoc}
*/
@Override
public void enterBlockFunctionBody(BallerinaParser.BlockFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startBlockFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitBlockFunctionBody(BallerinaParser.BlockFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endBlockFunctionBody(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterExprFunctionBody(BallerinaParser.ExprFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitExprFunctionBody(BallerinaParser.ExprFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprFunctionBody(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterExternalFunctionBody(BallerinaParser.ExternalFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExternFunctionBody();
}
/**
* {@inheritDoc}
*/
@Override
public void exitExternalFunctionBody(BallerinaParser.ExternalFunctionBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExternalFunctionBody(ctx.annotationAttachment().size(), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterFunctionDefinition(BallerinaParser.FunctionDefinitionContext ctx) {
if (isInErrorState) {
return;
}
int annotCount = ((BallerinaParser.CompilationUnitContext) ctx.parent.parent).annotationAttachment().size();
this.pkgBuilder.startFunctionDef(annotCount, false);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFunctionDefinition(BallerinaParser.FunctionDefinitionContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
boolean publicFunc = ctx.PUBLIC() != null;
boolean privateFunc = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean nativeFunc = ctx.functionDefinitionBody().externalFunctionBody() != null;
this.pkgBuilder.endFunctionDefinition(getCurrentPos(ctx), getWS(ctx), funcName,
getCurrentPos(ctx.anyIdentifierName()), publicFunc, remoteFunc,
nativeFunc, privateFunc, false);
}
@Override
public void enterExplicitAnonymousFunctionExpr(BallerinaParser.ExplicitAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLambdaFunctionDef(diagnosticSrc.pkgID);
}
@Override
public void exitExplicitAnonymousFunctionExpr(BallerinaParser.ExplicitAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLambdaFunctionDef(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterInferAnonymousFunctionExpr(BallerinaParser.InferAnonymousFunctionExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
@Override
public void exitInferAnonymousFunctionExpression(BallerinaParser.InferAnonymousFunctionExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addArrowFunctionDef(getCurrentPos(ctx), getWS(ctx), diagnosticSrc.pkgID);
}
@Override
public void exitInferParamList(BallerinaParser.InferParamListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWSForInferParamList(getWS(ctx));
}
@Override
public void exitInferParam(BallerinaParser.InferParamContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addVarWithoutType(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), false, 0);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFunctionSignature(BallerinaParser.FunctionSignatureContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFunctionSignature(getCurrentPos(ctx), getWS(ctx), ctx.formalParameterList() != null,
ctx.returnParameter() != null, ctx.formalParameterList() != null
&& ctx.formalParameterList().restParameter() != null);
}
/**
* {@inheritDoc}
*/
@Override
public void exitFiniteType(BallerinaParser.FiniteTypeContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endFiniteType(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTypeDefinition(BallerinaParser.TypeDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean publicObject = ctx.PUBLIC() != null;
this.pkgBuilder.endTypeDefinition(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), publicObject);
}
/**
* {@inheritDoc}
*/
@Override
public void enterObjectBody(BallerinaParser.ObjectBodyContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectType();
}
/**
* {@inheritDoc}
*/
@Override
public void exitObjectBody(BallerinaParser.ObjectBodyContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext)
|| (ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext
&& ctx.parent.parent.parent instanceof BallerinaParser.FiniteTypeContext
&& ctx.parent.parent.parent.getChildCount() > 1);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
boolean isAbstract = ((ObjectTypeNameLabelContext) ctx.parent).ABSTRACT() != null;
boolean isClient = ((ObjectTypeNameLabelContext) ctx.parent).CLIENT() != null;
this.pkgBuilder.addObjectType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous, isAbstract,
isClient, false);
}
/**
* {@inheritDoc}
*/
@Override
public void exitObjectTypeNameLabel(BallerinaParser.ObjectTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addObjectTypeName(getWS(ctx));
}
@Override
public void exitTypeReference(BallerinaParser.TypeReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTypeReference(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitFieldDefinition(BallerinaParser.FieldDefinitionContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos currentPos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String name = ctx.Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.Identifier());
boolean exprAvailable = ctx.expression() != null;
boolean isOptional = ctx.QUESTION_MARK() != null;
this.pkgBuilder.addFieldVariable(currentPos, ws, name, identifierPos, exprAvailable,
ctx.annotationAttachment().size(), false, isOptional);
}
/**
* {@inheritDoc}
*/
@Override
public void exitObjectFieldDefinition(BallerinaParser.ObjectFieldDefinitionContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos currentPos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String name = ctx.Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.Identifier());
boolean exprAvailable = ctx.expression() != null;
int annotationCount = ctx.annotationAttachment().size();
boolean isPrivate = ctx.PRIVATE() != null;
boolean isPublic = ctx.PUBLIC() != null;
this.pkgBuilder.addObjectFieldVariable(currentPos, ws, name, identifierPos, exprAvailable, annotationCount,
isPrivate, isPublic);
}
/**
* {@inheritDoc}
*/
@Override
public void enterMethodDeclaration(BallerinaParser.MethodDeclarationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectFunctionDef();
}
/**
* {@inheritDoc}
*/
@Override
public void enterMethodDefinition(BallerinaParser.MethodDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startObjectFunctionDef();
}
/**
* {@inheritDoc}
*/
@Override
public void exitMethodDefinition(BallerinaParser.MethodDefinitionContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
DiagnosticPos funcNamePos = getCurrentPos(ctx.anyIdentifierName());
boolean publicFunc = ctx.PUBLIC() != null;
boolean isPrivate = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean resourceFunc = ctx.RESOURCE() != null;
boolean markdownDocExists = ctx.documentationString() != null;
this.pkgBuilder.endObjectAttachedFunctionDef(getCurrentPos(ctx), getWS(ctx), funcName, funcNamePos, publicFunc,
isPrivate, remoteFunc, resourceFunc, false, markdownDocExists,
ctx.annotationAttachment().size());
}
/**
* {@inheritDoc}
*/
@Override
public void exitMethodDeclaration(BallerinaParser.MethodDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String funcName = ctx.anyIdentifierName().getText();
DiagnosticPos funcNamePos = getCurrentPos(ctx.anyIdentifierName());
boolean isPublic = ctx.PUBLIC() != null;
boolean isPrivate = ctx.PRIVATE() != null;
boolean remoteFunc = ctx.REMOTE() != null;
boolean resourceFunc = ctx.RESOURCE() != null;
boolean markdownDocExists = ctx.documentationString() != null;
this.pkgBuilder.endObjectAttachedFunctionDef(getCurrentPos(ctx), getWS(ctx), funcName, funcNamePos, isPublic,
isPrivate, remoteFunc, resourceFunc, true, markdownDocExists,
ctx.annotationAttachment().size());
}
/**
* {@inheritDoc}
*/
@Override
public void enterAnnotationDefinition(BallerinaParser.AnnotationDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAnnotationDef(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAnnotationDefinition(BallerinaParser.AnnotationDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean publicAnnotation = KEYWORD_PUBLIC.equals(ctx.getChild(0).getText());
boolean isTypeAttached = ctx.typeName() != null;
boolean isConst = ctx.CONST() != null;
this.pkgBuilder.endAnnotationDef(getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), publicAnnotation, isTypeAttached, isConst);
}
/**
* {@inheritDoc}
*/
@Override
public void exitConstantDefinition(BallerinaParser.ConstantDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean isPublic = ctx.PUBLIC() != null;
boolean isTypeAvailable = ctx.typeName() != null;
this.pkgBuilder.addConstant(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), isPublic, isTypeAvailable);
}
@Override
public void exitConstDivMulModExpression(BallerinaParser.ConstDivMulModExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitConstAddSubExpression(BallerinaParser.ConstAddSubExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitConstGroupExpression(BallerinaParser.ConstGroupExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createGroupExpression(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitGlobalVariableDefinition(BallerinaParser.GlobalVariableDefinitionContext ctx) {
if (isInErrorState) {
return;
}
boolean isPublic = ctx.PUBLIC() != null;
boolean isFinal = ctx.FINAL() != null;
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
boolean isListenerVar = ctx.LISTENER() != null;
boolean isTypeNameProvided = ctx.typeName() != null;
this.pkgBuilder.addGlobalVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()), isPublic, isFinal,
isDeclaredWithVar, isExpressionAvailable, isListenerVar, isTypeNameProvided);
}
@Override
public void exitAttachmentPoint(BallerinaParser.AttachmentPointContext ctx) {
if (isInErrorState) {
return;
}
AttachPoint attachPoint;
if (ctx.dualAttachPoint() != null) {
if (ctx.dualAttachPoint().SOURCE() != null) {
attachPoint = AttachPoint.getAttachmentPoint(ctx.dualAttachPoint().dualAttachPointIdent().getText(),
true);
} else {
attachPoint = AttachPoint.getAttachmentPoint(ctx.getText(), false);
}
} else {
attachPoint = AttachPoint.getAttachmentPoint(
ctx.sourceOnlyAttachPoint().sourceOnlyAttachPointIdent().getText(), true);
}
this.pkgBuilder.addAttachPoint(attachPoint, getWS(ctx));
}
@Override
public void enterWorkerDeclaration(BallerinaParser.WorkerDeclarationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWorker(diagnosticSrc.pkgID);
}
@Override
public void exitWorkerDeclaration(BallerinaParser.WorkerDeclarationContext ctx) {
if (isInErrorState) {
return;
}
String workerName = null;
DiagnosticPos workerNamePos = null;
if (ctx.workerDefinition() != null) {
workerName = escapeQuotedIdentifier(ctx.workerDefinition().Identifier().getText());
workerNamePos = getCurrentPos(ctx.workerDefinition().Identifier());
}
boolean retParamsAvail = ctx.workerDefinition().returnParameter() != null;
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.addWorker(
getCurrentPos(ctx), getWS(ctx), workerName, workerNamePos, retParamsAvail, numAnnotations);
}
/**
* {@inheritDoc}
*/
@Override
public void exitWorkerDefinition(BallerinaParser.WorkerDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.attachWorkerWS(getWS(ctx));
}
@Override
public void exitArrayTypeNameLabel(BallerinaParser.ArrayTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
int index = 1;
int dimensions = 0;
List<Integer> sizes = new ArrayList<>();
List<ParseTree> children = ctx.children;
while (index < children.size()) {
if (children.get(index).getText().equals("[")) {
if (children.get(index + 1).getText().equals("]")) {
sizes.add(UNSEALED_ARRAY_INDICATOR);
index += 2;
} else if (children.get(index + 1).getText().equals(OPEN_SEALED_ARRAY)) {
sizes.add(OPEN_SEALED_ARRAY_INDICATOR);
index += 1;
} else {
sizes.add(Integer.parseInt(children.get(index + 1).getText()));
index += 1;
}
dimensions++;
} else {
index++;
}
}
Collections.reverse(sizes);
this.pkgBuilder.addArrayType(
getCurrentPos(ctx), getWS(ctx), dimensions, sizes.stream().mapToInt(val -> val).toArray());
}
@Override
public void exitUnionTypeNameLabel(BallerinaParser.UnionTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addUnionType(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTupleTypeNameLabel(BallerinaParser.TupleTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTupleType(getCurrentPos(ctx), getWS(ctx), ctx.tupleTypeDescriptor().typeName().size(),
ctx.tupleTypeDescriptor().tupleRestDescriptor() != null);
}
@Override
public void exitNullableTypeNameLabel(BallerinaParser.NullableTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.markTypeNodeAsNullable(getWS(ctx));
}
@Override
public void exitGroupTypeNameLabel(BallerinaParser.GroupTypeNameLabelContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.markTypeNodeAsGrouped(getWS(ctx));
}
@Override
public void enterInclusiveRecordTypeDescriptor(BallerinaParser.InclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordType();
}
@Override
public void exitInclusiveRecordTypeDescriptor(BallerinaParser.InclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
this.pkgBuilder.addRecordType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous, false,
false);
}
@Override
public void enterExclusiveRecordTypeDescriptor(BallerinaParser.ExclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordType();
}
@Override
public void exitExclusiveRecordTypeDescriptor(BallerinaParser.ExclusiveRecordTypeDescriptorContext ctx) {
if (isInErrorState) {
return;
}
boolean isAnonymous = !(ctx.parent.parent instanceof BallerinaParser.FiniteTypeUnitContext);
boolean isFieldAnalyseRequired =
(ctx.parent.parent instanceof BallerinaParser.GlobalVariableDefinitionContext ||
ctx.parent.parent instanceof BallerinaParser.ReturnParameterContext) ||
ctx.parent.parent.parent.parent instanceof BallerinaParser.TypeDefinitionContext;
boolean hasRestField = ctx.recordRestFieldDefinition() != null;
this.pkgBuilder.addRecordType(getCurrentPos(ctx), getWS(ctx), isFieldAnalyseRequired, isAnonymous,
hasRestField, true);
}
@Override
public void exitSimpleTypeName(BallerinaParser.SimpleTypeNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.referenceTypeName() != null || ctx.valueTypeName() != null) {
return;
}
this.pkgBuilder.addValueType(getCurrentPos(ctx), getWS(ctx), ctx.getChild(0).getText());
}
@Override
public void exitUserDefineTypeName(BallerinaParser.UserDefineTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addUserDefineType(getWS(ctx));
}
@Override
public void exitValueTypeName(BallerinaParser.ValueTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addValueType(getCurrentPos(ctx), getWS(ctx), ctx.getText());
}
@Override
public void exitBuiltInReferenceTypeName(BallerinaParser.BuiltInReferenceTypeNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.functionTypeName() != null) {
return;
}
if (ctx.errorTypeName() != null) {
return;
}
String typeName = ctx.getChild(0).getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (ctx.typeName() != null) {
this.pkgBuilder.addConstraintTypeWithTypeName(pos, getWS(ctx), typeName);
} else {
this.pkgBuilder.addBuiltInReferenceType(pos, getWS(ctx), typeName);
}
}
@Override
public void enterErrorTypeName(BallerinaParser.ErrorTypeNameContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startErrorType();
}
@Override
public void exitErrorTypeName(BallerinaParser.ErrorTypeNameContext ctx) {
if (isInErrorState) {
return;
}
boolean reasonTypeExists = !ctx.typeName().isEmpty();
boolean detailsTypeExists = ctx.typeName().size() > 1;
boolean isAnonymous = !(ctx.parent.parent.parent.parent.parent.parent
instanceof BallerinaParser.FiniteTypeContext) && reasonTypeExists;
this.pkgBuilder.addErrorType(getCurrentPos(ctx), getWS(ctx), reasonTypeExists, detailsTypeExists, isAnonymous);
}
@Override
public void exitFunctionTypeName(BallerinaParser.FunctionTypeNameContext ctx) {
if (isInErrorState) {
return;
}
boolean paramsAvail = false, retParamAvail = false, restParamAvail = false;
if (ctx.parameterList() != null) {
paramsAvail = ctx.parameterList().parameter().size() > 0;
if (ctx.parameterList().restParameter() != null) {
restParamAvail = true;
}
} else if (ctx.parameterTypeNameList() != null) {
paramsAvail = ctx.parameterTypeNameList().parameterTypeName().size() > 0;
if (ctx.parameterTypeNameList().restParameterTypeName() != null) {
restParamAvail = true;
}
}
if (ctx.returnParameter() != null) {
retParamAvail = true;
}
this.pkgBuilder.addFunctionType(getCurrentPos(ctx), getWS(ctx), paramsAvail, restParamAvail, retParamAvail);
}
/**
* {@inheritDoc}
*/
@Override
public void enterAnnotationAttachment(BallerinaParser.AnnotationAttachmentContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAnnotationAttachment(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAnnotationAttachment(BallerinaParser.AnnotationAttachmentContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.setAnnotationAttachmentName(getWS(ctx), ctx.recordLiteral() != null,
getCurrentPos(ctx), false);
}
/**
* {@inheritDoc}
*/
@Override
public void exitErrorRestBindingPattern(BallerinaParser.ErrorRestBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWSForErrorRestBinding(getWS(ctx));
}
@Override
public void exitErrorBindingPattern(BallerinaParser.ErrorBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.typeName() != null) {
if (ctx.errorFieldBindingPatterns().errorRestBindingPattern() != null) {
String restIdName = ctx.errorFieldBindingPatterns().errorRestBindingPattern().Identifier().getText();
DiagnosticPos restPos = getCurrentPos(ctx.errorFieldBindingPatterns().errorRestBindingPattern());
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), restIdName, restPos);
} else {
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), null, null);
}
return;
}
String reasonIdentifier = ctx.Identifier().getText();
DiagnosticPos currentPos = getCurrentPos(ctx);
String restIdentifier = null;
DiagnosticPos restParamPos = null;
if (ctx.errorRestBindingPattern() != null) {
restIdentifier = ctx.errorRestBindingPattern().Identifier().getText();
restParamPos = getCurrentPos(ctx.errorRestBindingPattern());
}
this.pkgBuilder.addErrorVariable(currentPos, getWS(ctx), reasonIdentifier, restIdentifier, false, false,
restParamPos);
}
@Override
public void enterErrorBindingPattern(BallerinaParser.ErrorBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startErrorBindingNode();
}
@Override
public void enterErrorMatchPattern(BallerinaParser.ErrorMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startErrorBindingNode();
}
@Override
public void exitSimpleMatchPattern(BallerinaParser.SimpleMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endSimpleMatchPattern(getWS(ctx));
}
@Override
public void exitRestMatchPattern(BallerinaParser.RestMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWSForRestMatchPattern(getWS(ctx));
}
@Override
public void exitErrorArgListMatchPattern(BallerinaParser.ErrorArgListMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
String restIdentifier = null;
DiagnosticPos restParamPos = null;
if (ctx.restMatchPattern() != null) {
restIdentifier = ctx.restMatchPattern().Identifier().getText();
restParamPos = getCurrentPos(ctx.restMatchPattern());
}
String reasonIdentifier = null;
boolean reasonVar = false;
boolean constReasonMatchPattern = false;
if (ctx.simpleMatchPattern() != null) {
reasonVar = ctx.simpleMatchPattern().VAR() != null;
if (ctx.simpleMatchPattern().Identifier() != null) {
reasonIdentifier = ctx.simpleMatchPattern().Identifier().getText();
} else {
reasonIdentifier = ctx.simpleMatchPattern().QuotedStringLiteral().getText();
constReasonMatchPattern = true;
}
}
this.pkgBuilder.addErrorVariable(getCurrentPos(ctx), getWS(ctx), reasonIdentifier,
restIdentifier, reasonVar, constReasonMatchPattern, restParamPos);
}
@Override
public void exitErrorMatchPattern(BallerinaParser.ErrorMatchPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean isIndirectErrorMatchPatern = ctx.typeName() != null;
this.pkgBuilder.endErrorMatchPattern(getWS(ctx), isIndirectErrorMatchPatern);
}
@Override
public void exitErrorDetailBindingPattern(BallerinaParser.ErrorDetailBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
String bindingVarName = null;
if (ctx.bindingPattern() != null && ctx.bindingPattern().Identifier() != null) {
bindingVarName = ctx.bindingPattern().Identifier().getText();
}
this.pkgBuilder.addErrorDetailBinding(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
bindingVarName);
}
@Override
public void exitErrorRefBindingPattern(BallerinaParser.ErrorRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
int numNamedArgs = ctx.errorNamedArgRefPattern().size();
boolean reasonRefAvailable = ctx.variableReference() != null;
boolean restPatternAvailable = ctx.errorRefRestPattern() != null;
boolean indirectErrorRefPattern = ctx.typeName() != null;
this.pkgBuilder.addErrorVariableReference(getCurrentPos(ctx), getWS(ctx),
numNamedArgs, reasonRefAvailable, restPatternAvailable, indirectErrorRefPattern);
}
@Override
public void exitErrorNamedArgRefPattern(BallerinaParser.ErrorNamedArgRefPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNamedArgument(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText());
}
@Override
public void exitListBindingPattern(BallerinaParser.ListBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean restBindingAvailable = ctx.restBindingPattern() != null;
this.pkgBuilder.addTupleVariable(getCurrentPos(ctx), getWS(ctx), ctx.bindingPattern().size(),
restBindingAvailable);
}
@Override
public void exitListRefBindingPattern(BallerinaParser.ListRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean restPatternAvailable = ctx.listRefRestPattern() != null;
this.pkgBuilder.addTupleVariableReference(getCurrentPos(ctx), getWS(ctx), ctx.bindingRefPattern().size(),
restPatternAvailable);
}
@Override
public void enterRecordBindingPattern(BallerinaParser.RecordBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordVariableList();
}
@Override
public void exitRecordBindingPattern(BallerinaParser.RecordBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean hasRestBindingPattern = ctx.entryBindingPattern().restBindingPattern() != null;
this.pkgBuilder.addRecordVariable(getCurrentPos(ctx), getWS(ctx), hasRestBindingPattern);
}
@Override
public void enterRecordRefBindingPattern(BallerinaParser.RecordRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startRecordVariableReferenceList();
}
@Override
public void exitRecordRefBindingPattern(BallerinaParser.RecordRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
boolean hasRestBindingPattern = ctx.entryRefBindingPattern().restRefBindingPattern() != null;
this.pkgBuilder.addRecordVariableReference(getCurrentPos(ctx), getWS(ctx), hasRestBindingPattern);
}
@Override
public void exitBindingPattern(BallerinaParser.BindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if ((ctx.Identifier() != null) && ((ctx.parent instanceof BallerinaParser.ListBindingPatternContext)
|| (ctx.parent instanceof BallerinaParser.FieldBindingPatternContext)
|| (ctx.parent instanceof BallerinaParser.MatchPatternClauseContext))) {
this.pkgBuilder.addBindingPatternMemberVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()));
} else if (ctx.Identifier() != null) {
this.pkgBuilder.addBindingPatternNameWhitespace(getWS(ctx));
}
}
@Override
public void exitFieldBindingPattern(BallerinaParser.FieldBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFieldBindingMemberVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()),
ctx.bindingPattern() != null);
}
@Override
public void exitFieldRefBindingPattern(BallerinaParser.FieldRefBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFieldRefBindingMemberVar(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
ctx.bindingRefPattern() != null);
}
@Override
public void exitRestBindingPattern(BallerinaParser.RestBindingPatternContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
this.pkgBuilder.addBindingPatternMemberVariable(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()));
}
}
@Override
public void exitVariableDefinitionStatement(BallerinaParser.VariableDefinitionStatementContext ctx) {
if (isInErrorState) {
return;
}
boolean isFinal = ctx.FINAL() != null;
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
if (ctx.Identifier() != null) {
this.pkgBuilder.addSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
getCurrentPos(ctx.Identifier()),
isFinal, isExpressionAvailable, isDeclaredWithVar);
} else if (ctx.bindingPattern().Identifier() != null) {
this.pkgBuilder.addSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
ctx.bindingPattern().Identifier().getText(),
getCurrentPos(ctx.bindingPattern().Identifier()),
isFinal, isExpressionAvailable, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.addRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.addErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().listBindingPattern() != null) {
this.pkgBuilder.addTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx), isFinal, isDeclaredWithVar);
}
}
@Override
public void enterRecordLiteral(BallerinaParser.RecordLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMapStructLiteral();
}
@Override
public void exitRecordLiteral(BallerinaParser.RecordLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addMapStructLiteral(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitRecordField(BallerinaParser.RecordFieldContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.addNameReference(pos, getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createBLangRecordVarRefNameField(pos, getWS(ctx));
this.pkgBuilder.addIdentifierRecordField();
} else if (ctx.ELLIPSIS() != null) {
this.pkgBuilder.addSpreadOpRecordField(getWS(ctx));
} else {
this.pkgBuilder.addKeyValueRecordField(getWS(ctx), ctx.recordKey().LEFT_BRACKET() != null);
}
}
@Override
public void exitRecordKey(BallerinaParser.RecordKeyContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.addNameReference(pos, getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createSimpleVariableReference(pos, getWS(ctx));
} else if (ctx.LEFT_BRACKET() != null) {
this.pkgBuilder.addRecordKeyWS(getWS(ctx));
}
}
@Override
public void enterTableLiteral(BallerinaParser.TableLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTableLiteral();
}
@Override
public void exitTableColumnDefinition(BallerinaParser.TableColumnDefinitionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableColumnDefinition(getWS(ctx));
}
@Override
public void exitTableColumn(BallerinaParser.TableColumnContext ctx) {
if (isInErrorState) {
return;
}
String columnName;
int childCount = ctx.getChildCount();
if (childCount == 2) {
boolean keyColumn = KEYWORD_KEY.equals(ctx.getChild(0).getText());
if (keyColumn) {
columnName = escapeQuotedIdentifier(ctx.getChild(1).getText());
this.pkgBuilder.addTableColumn(columnName, getCurrentPos(ctx), getWS(ctx));
this.pkgBuilder.markPrimaryKeyColumn(columnName);
} else {
DiagnosticPos pos = getCurrentPos(ctx);
dlog.error(pos, DiagnosticCode.TABLE_KEY_EXPECTED);
}
} else {
columnName = escapeQuotedIdentifier(ctx.getChild(0).getText());
this.pkgBuilder.addTableColumn(columnName, getCurrentPos(ctx), getWS(ctx));
}
}
@Override
public void exitTableDataArray(BallerinaParser.TableDataArrayContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableDataArray(getWS(ctx));
}
@Override
public void exitTableDataList(BallerinaParser.TableDataListContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.expressionList() != null) {
this.pkgBuilder.endTableDataRow(getWS(ctx));
}
}
@Override
public void exitTableData(BallerinaParser.TableDataContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTableDataList(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTableLiteral(BallerinaParser.TableLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTableLiteral(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterStreamConstructorExpr(BallerinaParser.StreamConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startStreamConstructor(getCurrentPos(ctx), diagnosticSrc.pkgID);
}
@Override
public void exitStreamConstructorExpr(BallerinaParser.StreamConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endStreamConstructor(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitListConstructorExpr(BallerinaParser.ListConstructorExprContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.expressionList() != null;
this.pkgBuilder.addListConstructorExpression(getCurrentPos(ctx), getWS(ctx), argsAvailable);
}
@Override
public void enterLetExpr(BallerinaParser.LetExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLetVarList();
}
@Override
public void exitLetExpr(BallerinaParser.LetExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLetExpression(getCurrentPos(ctx));
}
@Override
public void exitLetVarDecl(BallerinaParser.LetVarDeclContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
boolean isExpressionAvailable = ctx.expression() != null;
int annotationAttachmentsSize = ctx.annotationAttachment().size();
if (ctx.bindingPattern().Identifier() != null) {
this.pkgBuilder.addSimpleLetVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
ctx.bindingPattern().Identifier().getText(),
getCurrentPos(ctx.bindingPattern().Identifier()),
isExpressionAvailable, isDeclaredWithVar, annotationAttachmentsSize);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.addRecordVariableLetDefStatement(getCurrentPos(ctx), getWS(ctx), isDeclaredWithVar,
annotationAttachmentsSize);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.addErrorVariableLetDefStatement(getCurrentPos(ctx), getWS(ctx), isDeclaredWithVar,
annotationAttachmentsSize);
} else if (ctx.bindingPattern().structuredBindingPattern().listBindingPattern() != null) {
this.pkgBuilder.addTupleVariableLetDefStatement(getCurrentPos(ctx), getWS(ctx), isDeclaredWithVar,
annotationAttachmentsSize);
}
}
@Override
public void exitTypeInitExpr(BallerinaParser.TypeInitExprContext ctx) {
if (isInErrorState) {
return;
}
String initName = ctx.NEW().getText();
boolean typeAvailable = ctx.userDefineTypeName() != null;
boolean argsAvailable = ctx.invocationArgList() != null;
this.pkgBuilder.addTypeInitExpression(getCurrentPos(ctx), getWS(ctx), initName, typeAvailable, argsAvailable);
}
@Override
/**
* {@inheritDoc}
*/
@Override
public void exitAssignmentStatement(BallerinaParser.AssignmentStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addAssignmentStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitListDestructuringStatement(BallerinaParser.ListDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTupleDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitRecordDestructuringStatement(BallerinaParser.RecordDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRecordDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitErrorDestructuringStatement(BallerinaParser.ErrorDestructuringStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addErrorDestructuringStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompoundAssignmentStatement(BallerinaParser.CompoundAssignmentStatementContext ctx) {
if (isInErrorState) {
return;
}
String compoundOperatorText = ctx.compoundOperator().getText();
String operator = compoundOperatorText.substring(0, compoundOperatorText.length() - 1);
this.pkgBuilder.addCompoundAssignmentStatement(getCurrentPos(ctx), getWS(ctx), operator);
}
/**
* {@inheritDoc}
*/
@Override
public void exitCompoundOperator(BallerinaParser.CompoundOperatorContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addCompoundOperator(getWS(ctx));
}
@Override
public void enterVariableReferenceList(BallerinaParser.VariableReferenceListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
@Override
public void exitVariableReferenceList(BallerinaParser.VariableReferenceListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
/**
* {@inheritDoc}
*/
@Override
public void enterIfElseStatement(BallerinaParser.IfElseStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startIfElseNode(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitIfElseStatement(BallerinaParser.IfElseStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endIfElseNode(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitIfClause(BallerinaParser.IfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addIfBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterElseIfClause(BallerinaParser.ElseIfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startIfElseNode(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitElseIfClause(BallerinaParser.ElseIfClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addElseIfBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterElseClause(BallerinaParser.ElseClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitElseClause(BallerinaParser.ElseClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addElseBlock(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterMatchStatement(BallerinaParser.MatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createMatchNode(getCurrentPos(ctx));
}
@Override
public void exitMatchStatement(BallerinaParser.MatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.completeMatchNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterMatchPatternClause(BallerinaParser.MatchPatternClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMatchStmtPattern();
}
@Override
public void exitMatchPatternClause(BallerinaParser.MatchPatternClauseContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.bindingPattern() != null || ctx.errorMatchPattern() != null) {
boolean isTypeGuardPresent = ctx.IF() != null;
this.pkgBuilder.addMatchStmtStructuredBindingPattern(getCurrentPos(ctx), getWS(ctx), isTypeGuardPresent);
return;
}
this.pkgBuilder.addMatchStmtStaticBindingPattern(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterForeachStatement(BallerinaParser.ForeachStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startForeachStatement();
}
@Override
public void exitForeachStatement(BallerinaParser.ForeachStatementContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
if (ctx.bindingPattern().Identifier() != null) {
String identifier = ctx.bindingPattern().Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.bindingPattern().Identifier());
this.pkgBuilder.addForeachStatementWithSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
identifier, identifierPos,
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.addForeachStatementWithRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.addForeachStatementWithErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else {
this.pkgBuilder.addForeachStatementWithTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
}
}
@Override
public void exitIntRangeExpression(BallerinaParser.IntRangeExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addIntRangeExpression(getCurrentPos(ctx), getWS(ctx),
ctx.LEFT_PARENTHESIS() == null, ctx.RIGHT_PARENTHESIS() == null,
ctx.expression(1) == null);
}
/**
* {@inheritDoc}
*/
@Override
public void enterWhileStatement(BallerinaParser.WhileStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWhileStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitWhileStatement(BallerinaParser.WhileStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addWhileStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitContinueStatement(BallerinaParser.ContinueStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addContinueStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitBreakStatement(BallerinaParser.BreakStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addBreakStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterForkJoinStatement(BallerinaParser.ForkJoinStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startForkJoinStmt();
}
@Override
public void exitForkJoinStatement(BallerinaParser.ForkJoinStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addForkJoinStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterTryCatchStatement(BallerinaParser.TryCatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTryCatchFinallyStmt();
}
@Override
public void exitTryCatchStatement(BallerinaParser.TryCatchStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTryCatchFinallyStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterCatchClauses(BallerinaParser.CatchClausesContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTryClause(getCurrentPos(ctx));
}
@Override
public void enterCatchClause(BallerinaParser.CatchClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startCatchClause();
}
@Override
public void exitCatchClause(BallerinaParser.CatchClauseContext ctx) {
if (isInErrorState) {
return;
}
String paramName = ctx.Identifier().getText();
this.pkgBuilder.addCatchClause(getCurrentPos(ctx), getWS(ctx), paramName);
}
@Override
public void enterFinallyClause(BallerinaParser.FinallyClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startFinallyBlock();
}
@Override
public void exitFinallyClause(BallerinaParser.FinallyClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addFinallyBlock(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitThrowStatement(BallerinaParser.ThrowStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addThrowStmt(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitPanicStatement(BallerinaParser.PanicStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addPanicStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnStatement(BallerinaParser.ReturnStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addReturnStatement(this.getCurrentPos(ctx), getWS(ctx), ctx.expression() != null);
}
@Override
public void exitWorkerReceiveExpression(BallerinaParser.WorkerReceiveExpressionContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerReceiveExpr(getCurrentPos(ctx), getWS(ctx), workerName, ctx.expression() != null);
}
@Override
public void exitFlushWorker(BallerinaParser.FlushWorkerContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.Identifier() != null ? ctx.Identifier().getText() : null;
this.pkgBuilder.addWorkerFlushExpr(getCurrentPos(ctx), getWS(ctx), workerName);
}
@Override
public void exitWorkerSendAsyncStatement(BallerinaParser.WorkerSendAsyncStatementContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerSendStmt(getCurrentPos(ctx), getWS(ctx), workerName, ctx.expression().size() > 1);
}
@Override
public void exitWorkerSendSyncExpression(BallerinaParser.WorkerSendSyncExpressionContext ctx) {
if (isInErrorState) {
return;
}
String workerName = ctx.peerWorker().DEFAULT() != null ?
ctx.peerWorker().DEFAULT().getText() : ctx.peerWorker().workerName().getText();
this.pkgBuilder.addWorkerSendSyncExpr(getCurrentPos(ctx), getWS(ctx), workerName);
}
@Override
public void exitWaitExpression(BallerinaParser.WaitExpressionContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.waitForCollection() != null) {
this.pkgBuilder.handleWaitForAll(getCurrentPos(ctx), getWS(ctx));
} else {
this.pkgBuilder.handleWait(getCurrentPos(ctx), getWS(ctx));
}
}
@Override
public void enterWaitForCollection(BallerinaParser.WaitForCollectionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startWaitForAll();
}
@Override
public void exitWaitKeyValue(BallerinaParser.WaitKeyValueContext ctx) {
if (isInErrorState) {
return;
}
boolean containsExpr = ctx.expression() != null;
this.pkgBuilder.addKeyValueToWaitForAll(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText(),
containsExpr);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlAttribVariableReference(BallerinaParser.XmlAttribVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean isSingleAttrRef = ctx.xmlAttrib().expression() != null;
this.pkgBuilder.createXmlAttributesRefExpr(getCurrentPos(ctx), getWS(ctx), isSingleAttrRef);
}
@Override
public void exitSimpleVariableReference(BallerinaParser.SimpleVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createSimpleVariableReference(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitInvocation(BallerinaParser.InvocationContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addInvocationWS(getWS(ctx));
}
@Override
public void exitStringFunctionInvocationReference(BallerinaParser.StringFunctionInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node = ctx.QuotedStringLiteral();
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String actualText = node.getText();
actualText = actualText.substring(1, actualText.length() - 1);
actualText = StringEscapeUtils.unescapeJava(actualText);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, actualText, node.getText());
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
@Override
public void exitGroupStringFunctionInvocationReference(GroupStringFunctionInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node = ctx.QuotedStringLiteral();
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
String actualText = node.getText();
actualText = actualText.substring(1, actualText.length() - 1);
actualText = StringEscapeUtils.unescapeJava(actualText);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, actualText, node.getText());
InvocationContext invocation = ctx.invocation();
boolean argsAvailable = invocation.invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = invocation.anyIdentifierName();
String invocationText = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(invocation), getWS(invocation), invocationText,
argsAvailable, getCurrentPos(identifierContext));
this.pkgBuilder.createGroupExpression(getCurrentPos(node), getWS(ctx));
}
@Override
public void exitFunctionInvocation(BallerinaParser.FunctionInvocationContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocationArgList() != null;
this.pkgBuilder.createFunctionInvocation(getCurrentPos(ctx), getWS(ctx), argsAvailable);
}
@Override
public void exitFieldVariableReference(BallerinaParser.FieldVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
createFieldBasedAccessNode(ctx, ctx.field());
}
@Override
public void exitGroupFieldVariableReference(BallerinaParser.GroupFieldVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
FieldContext field = ctx.field();
VariableReferenceContext groupExpression = ctx.variableReference();
createFieldBasedAccessNode(field, field);
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
private void createFieldBasedAccessNode(ParserRuleContext ctx, FieldContext field) {
String fieldName;
DiagnosticPos fieldNamePos;
FieldKind fieldType;
if (field.Identifier() != null) {
fieldName = field.Identifier().getText();
fieldNamePos = getCurrentPos(field);
fieldType = FieldKind.SINGLE;
} else {
fieldName = field.MUL().getText();
fieldNamePos = getCurrentPos(field);
fieldType = FieldKind.ALL;
}
this.pkgBuilder.createFieldBasedAccessNode(getCurrentPos(ctx), getWS(ctx), fieldName, fieldNamePos,
fieldType, field.OPTIONAL_FIELD_ACCESS() != null);
}
@Override
public void exitMapArrayVariableReference(BallerinaParser.MapArrayVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createIndexBasedAccessNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitGroupMapArrayVariableReference(BallerinaParser.GroupMapArrayVariableReferenceContext ctx) {
if (isInErrorState) {
return;
}
IndexContext index = ctx.index();
VariableReferenceContext groupExpression = ctx.variableReference();
this.pkgBuilder.createIndexBasedAccessNode(getCurrentPos(index), getWS(index));
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
@Override
public void exitReservedWord(BallerinaParser.ReservedWordContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startInvocationNode(getWS(ctx));
}
@Override
public void exitAnyIdentifierName(BallerinaParser.AnyIdentifierNameContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.reservedWord() == null) {
this.pkgBuilder.startInvocationNode(getWS(ctx));
}
}
@Override
public void exitInvocationReference(BallerinaParser.InvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
@Override
public void exitGroupInvocationReference(BallerinaParser.GroupInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
InvocationContext invocation = ctx.invocation();
VariableReferenceContext groupExpression = ctx.variableReference();
boolean argsAvailable = invocation.invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = invocation.anyIdentifierName();
String invocationText = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(invocation), getWS(invocation),
invocationText, argsAvailable, getCurrentPos(identifierContext));
this.pkgBuilder.createGroupExpression(getCurrentPos(groupExpression), getWS(groupExpression));
}
@Override
public void exitTypeDescExprInvocationReference(BallerinaParser.TypeDescExprInvocationReferenceContext ctx) {
if (isInErrorState) {
return;
}
boolean argsAvailable = ctx.invocation().invocationArgList() != null;
BallerinaParser.AnyIdentifierNameContext identifierContext = ctx.invocation().anyIdentifierName();
String invocation = identifierContext.getText();
this.pkgBuilder.createInvocationNode(getCurrentPos(ctx), getWS(ctx), invocation, argsAvailable,
getCurrentPos(identifierContext));
}
/**
* {@inheritDoc}
*/
@Override
public void enterInvocationArgList(BallerinaParser.InvocationArgListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitInvocationArgList(BallerinaParser.InvocationArgListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
public void enterExpressionList(BallerinaParser.ExpressionListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startExprNodeList();
}
@Override
public void exitExpressionList(BallerinaParser.ExpressionListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endExprNodeList(getWS(ctx), ctx.getChildCount() / 2 + 1);
}
@Override
public void exitExpressionStmt(BallerinaParser.ExpressionStmtContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addExpressionStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterTransactionStatement(BallerinaParser.TransactionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startTransactionStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionStatement(BallerinaParser.TransactionStatementContext ctx) {
if (isInErrorState) {
return;
}
DiagnosticPos pos = getCurrentPos(ctx);
this.pkgBuilder.endTransactionStmt(pos, getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionClause(BallerinaParser.TransactionClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addTransactionBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTransactionPropertyInitStatementList(
BallerinaParser.TransactionPropertyInitStatementListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endTransactionPropertyInitStatementList(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterLockStatement(BallerinaParser.LockStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startLockStmt();
}
/**
* {@inheritDoc}
*/
@Override
public void exitLockStatement(BallerinaParser.LockStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addLockStmt(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterOnretryClause(BallerinaParser.OnretryClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startOnretryBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitOnretryClause(BallerinaParser.OnretryClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addOnretryBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterCommittedClause(BallerinaParser.CommittedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startCommittedBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitCommittedClause(BallerinaParser.CommittedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endCommittedBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterAbortedClause(BallerinaParser.AbortedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startAbortedBlock();
}
/**
* {@inheritDoc}
*/
@Override
public void exitAbortedClause(BallerinaParser.AbortedClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endAbortedBlock(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitAbortStatement(BallerinaParser.AbortStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addAbortStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRetryStatement(BallerinaParser.RetryStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRetryStatement(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitRetriesStatement(BallerinaParser.RetriesStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRetryCountExpression(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void enterNamespaceDeclaration(BallerinaParser.NamespaceDeclarationContext ctx) {
}
@Override
public void exitNamespaceDeclaration(BallerinaParser.NamespaceDeclarationContext ctx) {
if (isInErrorState) {
return;
}
boolean isTopLevel = ctx.parent instanceof BallerinaParser.CompilationUnitContext;
String namespaceUri = ctx.QuotedStringLiteral().getText();
DiagnosticPos pos = getCurrentPos(ctx);
namespaceUri = namespaceUri.substring(1, namespaceUri.length() - 1);
namespaceUri = StringEscapeUtils.unescapeJava(namespaceUri);
String prefix = (ctx.Identifier() != null) ? ctx.Identifier().getText() : null;
DiagnosticPos prefixPos = (ctx.Identifier() != null) ? getCurrentPos(ctx.Identifier()) : null;
this.pkgBuilder.addXMLNSDeclaration(pos, getWS(ctx), namespaceUri, prefix, prefixPos, isTopLevel);
}
@Override
public void exitBinaryDivMulModExpression(BallerinaParser.BinaryDivMulModExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryOrExpression(BallerinaParser.BinaryOrExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryRefEqualExpression(BallerinaParser.BinaryRefEqualExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryEqualExpression(BallerinaParser.BinaryEqualExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitStaticMatchOrExpression(BallerinaParser.StaticMatchOrExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitStaticMatchIdentifierLiteral(BallerinaParser.StaticMatchIdentifierLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, ctx.Identifier().getText());
this.pkgBuilder.createSimpleVariableReference(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitTypeDescExpr(BallerinaParser.TypeDescExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeAccessExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitActionInvocation(BallerinaParser.ActionInvocationContext ctx) {
if (isInErrorState) {
return;
}
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.createActionInvocationNode(getCurrentPos(ctx), getWS(ctx), ctx.START() != null,
numAnnotations);
}
@Override
public void exitBinaryAndExpression(BallerinaParser.BinaryAndExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBinaryAddSubExpression(BallerinaParser.BinaryAddSubExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBitwiseExpression(BallerinaParser.BitwiseExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitBitwiseShiftExpression(BallerinaParser.BitwiseShiftExpressionContext ctx) {
if (isInErrorState) {
return;
}
StringBuilder operator = new StringBuilder();
for (int i = 1; i < ctx.getChildCount() - 1; i++) {
operator.append(ctx.getChild(i).getText());
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), operator.toString());
}
/**
* {@inheritDoc}
*/
@Override
public void exitTypeConversionExpression(BallerinaParser.TypeConversionExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeConversionExpr(getCurrentPos(ctx), getWS(ctx),
ctx.annotationAttachment().size(), ctx.typeName() != null);
}
@Override
public void exitBinaryCompareExpression(BallerinaParser.BinaryCompareExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitIntegerRangeExpression(BallerinaParser.IntegerRangeExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createBinaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(1).getText());
}
@Override
public void exitUnaryExpression(BallerinaParser.UnaryExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createUnaryExpr(getCurrentPos(ctx), getWS(ctx), ctx.getChild(0).getText());
}
@Override
public void exitTypeTestExpression(BallerinaParser.TypeTestExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTypeTestExpression(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitAnnotAccessExpression(BallerinaParser.AnnotAccessExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createAnnotAccessNode(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitGroupExpression(BallerinaParser.GroupExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createGroupExpression(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitTernaryExpression(BallerinaParser.TernaryExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTernaryExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitCheckedExpression(BallerinaParser.CheckedExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createCheckedExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitCheckPanickedExpression(BallerinaParser.CheckPanickedExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createCheckPanickedExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitFromClause(BallerinaParser.FromClauseContext ctx) {
if (isInErrorState) {
return;
}
boolean isDeclaredWithVar = ctx.VAR() != null;
if (ctx.bindingPattern().Identifier() != null) {
String identifier = ctx.bindingPattern().Identifier().getText();
DiagnosticPos identifierPos = getCurrentPos(ctx.bindingPattern().Identifier());
this.pkgBuilder.createFromClauseWithSimpleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
identifier, identifierPos,
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().recordBindingPattern() != null) {
this.pkgBuilder.createFromClauseWithRecordVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else if (ctx.bindingPattern().structuredBindingPattern().errorBindingPattern() != null) {
this.pkgBuilder.createFromClauseWithErrorVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
} else {
this.pkgBuilder.createFromClauseWithTupleVariableDefStatement(getCurrentPos(ctx), getWS(ctx),
isDeclaredWithVar);
}
}
@Override
public void exitWhereClause(BallerinaParser.WhereClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createWhereClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitSelectClause(BallerinaParser.SelectClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createSelectClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitDoClause(BallerinaParser.DoClauseContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createDoClause(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitQueryExpr(BallerinaParser.QueryExprContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createQueryExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void enterQueryActionStatement(BallerinaParser.QueryActionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startDoActionStatement();
}
@Override
public void exitQueryActionStatement(BallerinaParser.QueryActionStatementContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createQueryActionStatement(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitNameReference(BallerinaParser.NameReferenceContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier().size() == 2) {
String pkgName = ctx.Identifier(0).getText();
String name = ctx.Identifier(1).getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (Names.IGNORE.value.equals(pkgName)) {
dlog.error(pos, DiagnosticCode.INVALID_PACKAGE_NAME_QUALIFER, pkgName);
}
this.pkgBuilder.addNameReference(pos, getWS(ctx), pkgName, name);
} else {
String name = ctx.Identifier(0).getText();
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, name);
}
}
@Override
public void exitFunctionNameReference(BallerinaParser.FunctionNameReferenceContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.Identifier() != null) {
String pkgName = ctx.Identifier().getText();
String name = ctx.anyIdentifierName().getText();
DiagnosticPos pos = getCurrentPos(ctx);
if (Names.IGNORE.value.equals(pkgName)) {
dlog.error(pos, DiagnosticCode.INVALID_PACKAGE_NAME_QUALIFER, pkgName);
}
this.pkgBuilder.addNameReference(pos, getWS(ctx), pkgName, name);
} else {
String name = ctx.anyIdentifierName().getText();
this.pkgBuilder.addNameReference(getCurrentPos(ctx), getWS(ctx), null, name);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameter(BallerinaParser.ReturnParameterContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addReturnParam(getCurrentPos(ctx), getWS(ctx), ctx.annotationAttachment().size());
}
@Override
public void enterParameterTypeNameList(BallerinaParser.ParameterTypeNameListContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startVarList();
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterTypeNameList(BallerinaParser.ParameterTypeNameListContext ctx) {
if (isInErrorState) {
return;
}
ParserRuleContext parent = ctx.getParent();
boolean inFuncTypeSig = parent instanceof BallerinaParser.FunctionTypeNameContext ||
parent instanceof BallerinaParser.ReturnParameterContext &&
parent.parent instanceof BallerinaParser.FunctionTypeNameContext;
if (inFuncTypeSig) {
this.pkgBuilder.endFuncTypeParamList(getWS(ctx));
} else {
this.pkgBuilder.endCallableParamList(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterList(BallerinaParser.ParameterListContext ctx) {
if (isInErrorState) {
return;
}
ParserRuleContext parent = ctx.getParent();
boolean inFuncTypeSig = parent instanceof BallerinaParser.FunctionTypeNameContext ||
parent instanceof BallerinaParser.ReturnParameterContext &&
parent.parent instanceof BallerinaParser.FunctionTypeNameContext;
if (inFuncTypeSig) {
this.pkgBuilder.endFuncTypeParamList(getWS(ctx));
} else {
this.pkgBuilder.endCallableParamList(getWS(ctx));
}
}
private String fillWithZeros(String str) {
while (str.length() < 4) {
str = "0".concat(str);
}
return str;
}
/**
* {@inheritDoc}
*/
@Override
public void exitSimpleLiteral(BallerinaParser.SimpleLiteralContext ctx) {
if (isInErrorState) {
return;
}
TerminalNode node;
DiagnosticPos pos = getCurrentPos(ctx);
Set<Whitespace> ws = getWS(ctx);
Object value;
BallerinaParser.IntegerLiteralContext integerLiteralContext = ctx.integerLiteral();
if (integerLiteralContext != null && (value = getIntegerLiteral(ctx, ctx.integerLiteral())) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.INT, value, ctx.getText());
} else if (ctx.floatingPointLiteral() != null) {
if ((node = ctx.floatingPointLiteral().DecimalFloatingPointNumber()) != null) {
String nodeValue = getNodeValue(ctx, node);
int literalTypeTag = NumericLiteralSupport.isDecimalDiscriminated(nodeValue)
? TypeTags.DECIMAL : TypeTags.FLOAT;
this.pkgBuilder.addLiteralValue(pos, ws, literalTypeTag, nodeValue, node.getText());
} else if ((node = ctx.floatingPointLiteral().HexadecimalFloatingPointLiteral()) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.FLOAT, getHexNodeValue(ctx, node), node.getText());
}
} else if ((node = ctx.BooleanLiteral()) != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.BOOLEAN, Boolean.parseBoolean(node.getText()),
node.getText());
} else if ((node = ctx.QuotedStringLiteral()) != null) {
String text = node.getText();
text = text.substring(1, text.length() - 1);
String originalText = text;
Matcher matcher = pattern.matcher(text);
int position = 0;
while (matcher.find(position)) {
String hexStringVal = matcher.group(1);
int hexDecimalVal = Integer.parseInt(hexStringVal, 16);
if ((hexDecimalVal >= Constants.MIN_UNICODE && hexDecimalVal <= Constants.MIDDLE_LIMIT_UNICODE)
|| hexDecimalVal > Constants.MAX_UNICODE) {
String hexStringWithBraces = matcher.group(0);
int offset = originalText.indexOf(hexStringWithBraces) + 1;
dlog.error(new DiagnosticPos(diagnosticSrc, pos.sLine, pos.eLine, pos.sCol + offset,
pos.sCol + offset + hexStringWithBraces.length()),
DiagnosticCode.INVALID_UNICODE, hexStringWithBraces);
}
text = matcher.replaceFirst("\\\\u" + fillWithZeros(hexStringVal));
position = matcher.end() - 2;
matcher = pattern.matcher(text);
}
text = StringEscapeUtils.unescapeJava(text);
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.STRING, text, node.getText());
} else if (ctx.NullLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.NIL, null, "null");
} else if (ctx.nilLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.NIL, null, "()");
} else if (ctx.blobLiteral() != null) {
this.pkgBuilder.addLiteralValue(pos, ws, TypeTags.BYTE_ARRAY, ctx.blobLiteral().getText());
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitNamedArgs(BallerinaParser.NamedArgsContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addNamedArgument(getCurrentPos(ctx), getWS(ctx), ctx.Identifier().getText());
}
/**
* {@inheritDoc}
*/
@Override
public void exitRestArgs(BallerinaParser.RestArgsContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.addRestArgument(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlLiteral(BallerinaParser.XmlLiteralContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.attachXmlLiteralWS(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitComment(BallerinaParser.CommentContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLCommentTemplateText());
String endingString = getTemplateEndingStr(ctx.XMLCommentText());
this.pkgBuilder.createXMLCommentLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString);
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitElement(BallerinaParser.ElementContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitStartTag(BallerinaParser.StartTagContext ctx) {
if (isInErrorState) {
return;
}
boolean isRoot = ctx.parent.parent instanceof BallerinaParser.XmlItemContext;
this.pkgBuilder.startXMLElement(getCurrentPos(ctx), getWS(ctx), isRoot);
}
/**
* {@inheritDoc}
*/
@Override
public void exitCloseTag(BallerinaParser.CloseTagContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endXMLElement(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitEmptyTag(BallerinaParser.EmptyTagContext ctx) {
if (isInErrorState) {
return;
}
boolean isRoot = ctx.parent.parent instanceof BallerinaParser.XmlItemContext;
this.pkgBuilder.startXMLElement(getCurrentPos(ctx), getWS(ctx), isRoot);
}
/**
* {@inheritDoc}
*/
@Override
public void exitProcIns(BallerinaParser.ProcInsContext ctx) {
if (isInErrorState) {
return;
}
String targetQName = ctx.XML_TAG_SPECIAL_OPEN().getText();
targetQName = targetQName.substring(2, targetQName.length() - 1);
Stack<String> textFragments = getTemplateTextFragments(ctx.XMLPITemplateText());
String endingText = getTemplateEndingStr(ctx.XMLPIText());
endingText = endingText.substring(0, endingText.length() - 2);
this.pkgBuilder.createXMLPILiteral(getCurrentPos(ctx), getWS(ctx), targetQName, textFragments, endingText);
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addChildToXMLElement(getWS(ctx));
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitAttribute(BallerinaParser.AttributeContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createXMLAttribute(getCurrentPos(ctx), getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitText(BallerinaParser.TextContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> textFragments = getTemplateTextFragments(ctx.XMLTemplateText());
String endingText = getTemplateEndingStr(ctx.XMLText());
if (ctx.getParent() instanceof BallerinaParser.ContentContext) {
this.pkgBuilder.addXMLTextToElement(getCurrentPos(ctx), getWS(ctx), textFragments, endingText);
} else {
this.pkgBuilder.createXMLTextLiteral(getCurrentPos(ctx), getWS(ctx), textFragments, endingText);
}
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlSingleQuotedString(BallerinaParser.XmlSingleQuotedStringContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLSingleQuotedTemplateString());
String endingString = getTemplateEndingStr(ctx.XMLSingleQuotedString());
this.pkgBuilder.createXMLQuotedLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString,
QuoteType.SINGLE_QUOTE);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlDoubleQuotedString(BallerinaParser.XmlDoubleQuotedStringContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments = getTemplateTextFragments(ctx.XMLDoubleQuotedTemplateString());
String endingString = getTemplateEndingStr(ctx.XMLDoubleQuotedString());
this.pkgBuilder.createXMLQuotedLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingString,
QuoteType.DOUBLE_QUOTE);
}
/**
* {@inheritDoc}
*/
@Override
public void exitXmlQualifiedName(BallerinaParser.XmlQualifiedNameContext ctx) {
if (isInErrorState) {
return;
}
List<TerminalNode> qnames = ctx.XMLQName();
String prefix = null;
String localname;
if (qnames.size() > 1) {
prefix = qnames.get(0).getText();
localname = qnames.get(1).getText();
} else {
localname = qnames.get(0).getText();
}
this.pkgBuilder.createXMLQName(getCurrentPos(ctx), getWS(ctx), localname, prefix);
}
/**
* {@inheritDoc}
*/
@Override
public void exitStringTemplateLiteral(BallerinaParser.StringTemplateLiteralContext ctx) {
if (isInErrorState) {
return;
}
Stack<String> stringFragments;
String endingText = null;
StringTemplateContentContext contentContext = ctx.stringTemplateContent();
if (contentContext != null) {
stringFragments = getTemplateTextFragments(contentContext.StringTemplateExpressionStart());
endingText = getTemplateEndingStr(contentContext.StringTemplateText());
} else {
stringFragments = new Stack<>();
}
this.pkgBuilder.createStringTemplateLiteral(getCurrentPos(ctx), getWS(ctx), stringFragments, endingText);
}
/**
* {@inheritDoc}
*/
@Override
public void enterDocumentationString(BallerinaParser.DocumentationStringContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.startMarkdownDocumentationString(getCurrentPos(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationString(BallerinaParser.DocumentationStringContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endMarkdownDocumentationString(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationLine(BallerinaParser.DocumentationLineContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endMarkDownDocumentLine(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitDocumentationContent(BallerinaParser.DocumentationContentContext ctx) {
if (isInErrorState) {
return;
}
String text = ctx.getText() != null ? ctx.getText() : "";
this.pkgBuilder.endMarkdownDocumentationText(getCurrentPos(ctx), getWS(ctx), text);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDocumentationLine(BallerinaParser.ParameterDocumentationLineContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.endParameterDocumentationLine(getWS(ctx));
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDocumentation(BallerinaParser.ParameterDocumentationContext ctx) {
if (isInErrorState) {
return;
}
String parameterName = ctx.docParameterName() != null ? ctx.docParameterName().getText() : "";
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endParameterDocumentation(getCurrentPos(ctx.docParameterName()), getWS(ctx), parameterName,
description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitParameterDescriptionLine(BallerinaParser.ParameterDescriptionLineContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endParameterDocumentationDescription(getWS(ctx), description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameterDocumentation(BallerinaParser.ReturnParameterDocumentationContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endReturnParameterDocumentation(getCurrentPos(ctx.getParent()), getWS(ctx), description);
}
/**
* {@inheritDoc}
*/
@Override
public void exitReturnParameterDescriptionLine(BallerinaParser.ReturnParameterDescriptionLineContext ctx) {
if (isInErrorState) {
return;
}
String description = ctx.documentationText() != null ? ctx.documentationText().getText() : "";
this.pkgBuilder.endReturnParameterDocumentationDescription(getWS(ctx), description);
}
@Override
public void exitTrapExpression(BallerinaParser.TrapExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createTrapExpr(getCurrentPos(ctx), getWS(ctx));
}
@Override
public void exitVariableReferenceExpression(BallerinaParser.VariableReferenceExpressionContext ctx) {
if (isInErrorState) {
return;
}
if (ctx.START() != null) {
int numAnnotations = ctx.annotationAttachment().size();
this.pkgBuilder.markLastInvocationAsAsync(getCurrentPos(ctx), numAnnotations);
}
}
@Override
public void exitDocumentationReference(BallerinaParser.DocumentationReferenceContext ctx) {
if (isInErrorState) {
return;
}
BallerinaParser.ReferenceTypeContext referenceType = ctx.referenceType();
BallerinaParser.SingleBacktickedContentContext backtickedContent = ctx.singleBacktickedContent();
this.pkgBuilder.endDocumentationReference(getCurrentPos(ctx), referenceType.getText(),
backtickedContent.getText());
}
@Override
public void exitSingleBacktickedBlock(BallerinaParser.SingleBacktickedBlockContext ctx) {
if (isInErrorState) {
return;
}
BallerinaParser.SingleBacktickedContentContext backtickedContent = ctx.singleBacktickedContent();
this.pkgBuilder.endSingleBacktickedBlock(getCurrentPos(ctx), backtickedContent.getText());
}
/**
* {@inheritDoc}
*/
@Override
public void exitElvisExpression(BallerinaParser.ElvisExpressionContext ctx) {
if (isInErrorState) {
return;
}
this.pkgBuilder.createElvisExpr(getCurrentPos(ctx), getWS(ctx));
}
private DiagnosticPos getCurrentPos(ParserRuleContext ctx) {
int startLine = ctx.getStart().getLine();
int startCol = ctx.getStart().getCharPositionInLine() + 1;
int endLine = -1;
int endCol = -1;
Token stop = ctx.getStop();
if (stop != null) {
endLine = stop.getLine();
endCol = stop.getCharPositionInLine() + (stop.getStopIndex() - stop.getStartIndex() + 1) + 1;
}
return new DiagnosticPos(diagnosticSrc, startLine, endLine, startCol, endCol);
}
private DiagnosticPos getCurrentPos(TerminalNode node) {
Token symbol = node.getSymbol();
int startLine = symbol.getLine();
int startCol = symbol.getCharPositionInLine() + 1;
int endLine = startLine;
int endCol = startCol + symbol.getText().length();
return new DiagnosticPos(diagnosticSrc, startLine, endLine, startCol, endCol);
}
protected Set<Whitespace> getWS(ParserRuleContext ctx) {
return null;
}
private Stack<String> getTemplateTextFragments(List<TerminalNode> nodes) {
Stack<String> templateStrFragments = new Stack<>();
nodes.forEach(node -> {
if (node == null) {
templateStrFragments.push(null);
} else {
String str = node.getText();
templateStrFragments.push(str.substring(0, str.length() - 2));
}
});
return templateStrFragments;
}
private String getTemplateEndingStr(TerminalNode node) {
return node == null ? null : node.getText();
}
private String getTemplateEndingStr(List<TerminalNode> nodes) {
StringJoiner joiner = new StringJoiner("");
nodes.forEach(node -> joiner.add(node.getText()));
return joiner.toString();
}
private String getNodeValue(ParserRuleContext ctx, TerminalNode node) {
String op = ctx.getChild(0).getText();
String value = node.getText();
if (op != null && "-".equals(op)) {
value = "-" + value;
}
return value;
}
private String getHexNodeValue(ParserRuleContext ctx, TerminalNode node) {
String value = getNodeValue(ctx, node);
if (!(value.contains("p") || value.contains("P"))) {
value = value + "p0";
}
return value;
}
private Object getIntegerLiteral(ParserRuleContext simpleLiteralContext,
BallerinaParser.IntegerLiteralContext integerLiteralContext) {
if (integerLiteralContext.DecimalIntegerLiteral() != null) {
String nodeValue = getNodeValue(simpleLiteralContext, integerLiteralContext.DecimalIntegerLiteral());
return parseLong(simpleLiteralContext, nodeValue, nodeValue, 10, DiagnosticCode.INTEGER_TOO_SMALL,
DiagnosticCode.INTEGER_TOO_LARGE);
} else if (integerLiteralContext.HexIntegerLiteral() != null) {
String nodeValue = getNodeValue(simpleLiteralContext, integerLiteralContext.HexIntegerLiteral());
String processedNodeValue = nodeValue.toLowerCase().replace("0x", "");
return parseLong(simpleLiteralContext, nodeValue, processedNodeValue, 16,
DiagnosticCode.HEXADECIMAL_TOO_SMALL, DiagnosticCode.HEXADECIMAL_TOO_LARGE);
}
return null;
}
private Object parseLong(ParserRuleContext context, String originalNodeValue, String processedNodeValue, int radix,
DiagnosticCode code1, DiagnosticCode code2) {
try {
return Long.parseLong(processedNodeValue, radix);
} catch (Exception e) {
DiagnosticPos pos = getCurrentPos(context);
Set<Whitespace> ws = getWS(context);
if (originalNodeValue.startsWith("-")) {
dlog.error(pos, code1, originalNodeValue);
} else {
dlog.error(pos, code2, originalNodeValue);
}
}
return originalNodeValue;
}
/**
* Mark that this listener is in error state.
*/
public void setErrorState() {
this.isInErrorState = true;
}
/**
* Mark that this listener is not in an error state.
*/
public void unsetErrorState() {
this.isInErrorState = false;
}
boolean isInErrorState() {
return this.isInErrorState;
}
}
|
Would you confirm if it's possible to reconnect to the old JM? if it can happen, the retained partitions will be leaked.
|
public void jobManagerLostLeadership(final JobID jobId, final JobMasterId jobMasterId) {
log.info(
"JobManager for job {} with leader id {} lost leadership.", jobId, jobMasterId);
runAsync(
() -> {
Optional<JobTable.Connection> connection = jobTable.getConnection(jobId);
if (connection.isPresent()) {
Exception cause =
new Exception(
"Job leader for job id " + jobId + " lost leadership.");
if (isJobRecoveryEnabled()) {
disconnectJobManagerConnectionAndCleanupPartitionLater(
connection.get(), cause);
} else {
disconnectJobManagerConnection(connection.get(), cause);
}
}
});
}
|
disconnectJobManagerConnectionAndCleanupPartitionLater(
|
public void jobManagerLostLeadership(final JobID jobId, final JobMasterId jobMasterId) {
log.info(
"JobManager for job {} with leader id {} lost leadership.", jobId, jobMasterId);
runAsync(
() ->
jobTable.getConnection(jobId)
.ifPresent(
jobManagerConnection ->
disconnectJobManagerConnection(
jobManagerConnection,
new Exception(
"Job leader for job id "
+ jobId
+ " lost leadership."),
!shouldRetainPartitionsOnJobManagerConnectionLost())));
}
|
class JobLeaderListenerImpl implements JobLeaderListener {
@Override
public void jobManagerGainedLeadership(
final JobID jobId,
final JobMasterGateway jobManagerGateway,
final JMTMRegistrationSuccess registrationMessage) {
runAsync(
() ->
jobTable.getJob(jobId)
.ifPresent(
job ->
establishJobManagerConnection(
job,
jobManagerGateway,
registrationMessage)));
}
@Override
@Override
public void handleError(Throwable throwable) {
onFatalError(throwable);
}
@Override
public void jobManagerRejectedRegistration(
JobID jobId, String targetAddress, JMTMRegistrationRejection rejection) {
runAsync(() -> handleRejectedJobManagerConnection(jobId, targetAddress, rejection));
}
}
|
class JobLeaderListenerImpl implements JobLeaderListener {
@Override
public void jobManagerGainedLeadership(
final JobID jobId,
final JobMasterGateway jobManagerGateway,
final JMTMRegistrationSuccess registrationMessage) {
runAsync(
() ->
jobTable.getJob(jobId)
.ifPresent(
job ->
establishJobManagerConnection(
job,
jobManagerGateway,
registrationMessage)));
}
@Override
@Override
public void handleError(Throwable throwable) {
onFatalError(throwable);
}
@Override
public void jobManagerRejectedRegistration(
JobID jobId, String targetAddress, JMTMRegistrationRejection rejection) {
runAsync(() -> handleRejectedJobManagerConnection(jobId, targetAddress, rejection));
}
}
|
The difference between `CalciteSqlDialect` and `AntiSqlDialect` is the method `withIdentifierQuoteString()`. Would replacing `\"` to ``` have any other impact in Flink?
|
public String toQuotedSqlString(SqlNode sqlNode) {
SqlParser.Config parserConfig = flinkPlanner.config().getParserConfig();
SqlDialect dialect =
new AnsiSqlDialect(
SqlDialect.EMPTY_CONTEXT
.withQuotedCasing(parserConfig.unquotedCasing())
.withConformance(parserConfig.conformance())
.withUnquotedCasing(parserConfig.unquotedCasing())
.withIdentifierQuoteString(parserConfig.quoting().string));
return sqlNode.toSqlString(dialect).getSql();
}
|
new AnsiSqlDialect(
|
public String toQuotedSqlString(SqlNode sqlNode) {
SqlParser.Config parserConfig = flinkPlanner.config().getParserConfig();
SqlDialect dialect =
new AnsiSqlDialect(
SqlDialect.EMPTY_CONTEXT
.withQuotedCasing(parserConfig.unquotedCasing())
.withConformance(parserConfig.conformance())
.withUnquotedCasing(parserConfig.unquotedCasing())
.withIdentifierQuoteString(parserConfig.quoting().string));
return sqlNode.toSqlString(dialect).getSql();
}
|
class SqlNodeConvertContext implements SqlNodeConverter.ConvertContext {
private final FlinkPlannerImpl flinkPlanner;
private final CatalogManager catalogManager;
public SqlNodeConvertContext(FlinkPlannerImpl flinkPlanner, CatalogManager catalogManager) {
this.flinkPlanner = flinkPlanner;
this.catalogManager = catalogManager;
}
@Override
public SqlValidator getSqlValidator() {
return flinkPlanner.getOrCreateSqlValidator();
}
@Override
public CatalogManager getCatalogManager() {
return catalogManager;
}
@Override
public RelRoot toRelRoot(SqlNode sqlNode) {
return flinkPlanner.rel(sqlNode);
}
@Override
@Override
public String expandSqlIdentifiers(String originalSql) {
return Expander.create(flinkPlanner)
.expanded(originalSql)
.substitute(this::toQuotedSqlString);
}
}
|
class SqlNodeConvertContext implements SqlNodeConverter.ConvertContext {
private final FlinkPlannerImpl flinkPlanner;
private final CatalogManager catalogManager;
public SqlNodeConvertContext(FlinkPlannerImpl flinkPlanner, CatalogManager catalogManager) {
this.flinkPlanner = flinkPlanner;
this.catalogManager = catalogManager;
}
@Override
public SqlValidator getSqlValidator() {
return flinkPlanner.getOrCreateSqlValidator();
}
@Override
public CatalogManager getCatalogManager() {
return catalogManager;
}
@Override
public RelRoot toRelRoot(SqlNode sqlNode) {
return flinkPlanner.rel(sqlNode);
}
@Override
@Override
public String expandSqlIdentifiers(String originalSql) {
return Expander.create(flinkPlanner)
.expanded(originalSql)
.substitute(this::toQuotedSqlString);
}
}
|
Either way, is this something that we can handle inside Tables SDK to map it to a common exception type for both Storage and Cosmos? It would be odd for users to get `TableTransactionFailedException` and `TableServiceException` for the same transaction depending on the source service.
|
void submitTransactionAsyncWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableServiceException
&& e.getMessage().contains("Status code 400")
&& e.getMessage().contains("InvalidDuplicateRow")
&& e.getMessage().contains("The batch request contains multiple changes with same row key.")
&& e.getMessage().contains("An entity can appear only once in a batch request."))
.verify();
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify();
}
}
|
}
|
void submitTransactionAsyncWithSameRowKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableServiceException
&& e.getMessage().contains("Status code 400")
&& e.getMessage().contains("InvalidDuplicateRow")
&& e.getMessage().contains("The batch request contains multiple changes with same row key.")
&& e.getMessage().contains("An entity can appear only once in a batch request."))
.verify();
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify();
}
}
|
class TableAsyncClientTest extends TestBase {
private static final Duration TIMEOUT = Duration.ofSeconds(100);
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = System.getenv("AZURE_TABLES_CONNECTION_STRING") != null
&& System.getenv("AZURE_TABLES_CONNECTION_STRING").contains("cosmos.azure.com");
private TableAsyncClient tableClient;
private HttpPipelinePolicy recordPolicy;
private HttpClient playbackClient;
private TableClientBuilder getClientBuilder(String tableName, String connectionString) {
final TableClientBuilder builder = new TableClientBuilder()
.connectionString(connectionString)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
playbackClient = interceptorManager.getPlaybackClient();
builder.httpClient(playbackClient);
} else {
builder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
recordPolicy = interceptorManager.getRecordPolicy();
builder.addPolicy(recordPolicy);
}
}
return builder;
}
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(TIMEOUT);
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@Override
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildAsyncClient();
tableClient.createTable().block(TIMEOUT);
}
@Test
void createTableAsync() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify();
}
@Test
void createTableWithResponseAsync() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
final int expectedStatusCode = 204;
StepVerifier.create(tableClient2.createTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify();
}
@Test
void createEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient.createEntity(tableEntity))
.expectComplete()
.verify();
}
@Test
void createEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void createEntityWithAllSupportedDataTypesAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
})
.expectComplete()
.verify();
}
/*@Test
void createEntitySubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
})
.expectComplete()
.verify();
}*/
@Test
void deleteTableAsync() {
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify();
}
@Test
void deleteNonExistingTableAsync() {
tableClient.deleteTable().block();
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify();
}
@Test
void deleteTableWithResponseAsync() {
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify();
}
@Test
void deleteNonExistingTableWithResponseAsync() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse().block();
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify();
}
@Test
void deleteNonExistingEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify();
}
@Test
void deleteEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteNonExistingEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
StepVerifier.create(tableClient.deleteEntityWithResponse(entity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteEntityWithResponseMatchETagAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void getEntityWithResponseAsync() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer);
}
static void getEntityWithResponseAsyncImpl(TableAsyncClient tableClient, TestResourceNamer testResourceNamer) {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify();
}
@Test
void getEntityWithResponseWithSelectAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
})
.expectComplete()
.verify();
}
/*@Test
void getEntityWithResponseSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class))
.assertNext(response -> {
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
})
.expectComplete()
.verify();
}*/
@Test
void updateEntityWithResponseReplaceAsync() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE);
}
@Test
void updateEntityWithResponseMergeAsync() {
updateEntityWithResponseAsync(TableEntityUpdateMode.MERGE);
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseAsync(TableEntityUpdateMode mode) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, mode, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
})
.verifyComplete();
}
/*@Test
void updateEntityWithResponseSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity).block(TIMEOUT);
tableEntity.setSubclassProperty("UpdatedValue");
StepVerifier.create(tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
})
.verifyComplete();
}*/
@Test
void listEntitiesAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
@Test
void listEntitiesWithFilterAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertEquals(partitionKeyValue, returnEntity.getPartitionKey());
assertEquals(rowKeyValue, returnEntity.getRowKey());
})
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
@Test
void listEntitiesWithSelectAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertNull(returnEntity.getRowKey());
assertNull(returnEntity.getPartitionKey());
assertEquals("valueC", returnEntity.getProperties().get("propertyC"));
assertNull(returnEntity.getProperties().get("propertyD"));
})
.expectComplete()
.verify();
}
@Test
void listEntitiesWithTopAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
/*@Test
void listEntitiesSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(SampleEntity.class))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}*/
@Test
void submitTransactionAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch).block(TIMEOUT);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify();
}
@Test
void submitTransactionAsyncAllActions() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValueCreate = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueDelete = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete)).block(TIMEOUT);
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.assertNext(response -> {
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
})
.expectComplete()
.verify();
}
@Test
void submitTransactionAsyncWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("DeleteEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify();
}
@Test
@Test
void submitTransactionAsyncWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify();
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue2)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify();
}
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "SAS Tokens are not supported for Cosmos endpoints.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier)))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
})
.expectComplete()
.verify();
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
})
.expectComplete()
.verify();
}
}
|
class TableAsyncClientTest extends TestBase {
private static final Duration TIMEOUT = Duration.ofSeconds(100);
private static final HttpClient DEFAULT_HTTP_CLIENT = HttpClient.createDefault();
private static final boolean IS_COSMOS_TEST = System.getenv("AZURE_TABLES_CONNECTION_STRING") != null
&& System.getenv("AZURE_TABLES_CONNECTION_STRING").contains("cosmos.azure.com");
private TableAsyncClient tableClient;
private HttpPipelinePolicy recordPolicy;
private HttpClient playbackClient;
private TableClientBuilder getClientBuilder(String tableName, String connectionString) {
final TableClientBuilder builder = new TableClientBuilder()
.connectionString(connectionString)
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.tableName(tableName);
if (interceptorManager.isPlaybackMode()) {
playbackClient = interceptorManager.getPlaybackClient();
builder.httpClient(playbackClient);
} else {
builder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
recordPolicy = interceptorManager.getRecordPolicy();
builder.addPolicy(recordPolicy);
}
}
return builder;
}
@BeforeAll
static void beforeAll() {
StepVerifier.setDefaultTimeout(TIMEOUT);
}
@AfterAll
static void afterAll() {
StepVerifier.resetDefaultTimeout();
}
@Override
protected void beforeTest() {
final String tableName = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
tableClient = getClientBuilder(tableName, connectionString).buildAsyncClient();
tableClient.createTable().block(TIMEOUT);
}
@Test
void createTableAsync() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
StepVerifier.create(tableClient2.createTable())
.assertNext(Assertions::assertNotNull)
.expectComplete()
.verify();
}
@Test
void createTableWithResponseAsync() {
final String tableName2 = testResourceNamer.randomName("tableName", 20);
final String connectionString = TestUtils.getConnectionString(interceptorManager.isPlaybackMode());
final TableAsyncClient tableClient2 = getClientBuilder(tableName2, connectionString).buildAsyncClient();
final int expectedStatusCode = 204;
StepVerifier.create(tableClient2.createTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify();
}
@Test
void createEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
StepVerifier.create(tableClient.createEntity(tableEntity))
.expectComplete()
.verify();
}
@Test
void createEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void createEntityWithAllSupportedDataTypesAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final boolean booleanValue = true;
final byte[] binaryValue = "Test value".getBytes();
final Date dateValue = new Date();
final OffsetDateTime offsetDateTimeValue = OffsetDateTime.now();
final double doubleValue = 2.0d;
final UUID guidValue = UUID.randomUUID();
final int int32Value = 1337;
final long int64Value = 1337L;
final String stringValue = "This is table entity";
tableEntity.addProperty("BinaryTypeProperty", binaryValue);
tableEntity.addProperty("BooleanTypeProperty", booleanValue);
tableEntity.addProperty("DateTypeProperty", dateValue);
tableEntity.addProperty("OffsetDateTimeTypeProperty", offsetDateTimeValue);
tableEntity.addProperty("DoubleTypeProperty", doubleValue);
tableEntity.addProperty("GuidTypeProperty", guidValue);
tableEntity.addProperty("Int32TypeProperty", int32Value);
tableEntity.addProperty("Int64TypeProperty", int64Value);
tableEntity.addProperty("StringTypeProperty", stringValue);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.get("BinaryTypeProperty") instanceof byte[]);
assertTrue(properties.get("BooleanTypeProperty") instanceof Boolean);
assertTrue(properties.get("DateTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("OffsetDateTimeTypeProperty") instanceof OffsetDateTime);
assertTrue(properties.get("DoubleTypeProperty") instanceof Double);
assertTrue(properties.get("GuidTypeProperty") instanceof UUID);
assertTrue(properties.get("Int32TypeProperty") instanceof Integer);
assertTrue(properties.get("Int64TypeProperty") instanceof Long);
assertTrue(properties.get("StringTypeProperty") instanceof String);
})
.expectComplete()
.verify();
}
/*@Test
void createEntitySubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
SampleEntity tableEntity = new SampleEntity(partitionKeyValue, rowKeyValue);
tableEntity.setByteField(bytes);
tableEntity.setBooleanField(b);
tableEntity.setDateTimeField(dateTime);
tableEntity.setDoubleField(d);
tableEntity.setUuidField(uuid);
tableEntity.setIntField(i);
tableEntity.setLongField(l);
tableEntity.setStringField(s);
tableEntity.setEnumField(color);
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
TableEntity entity = response.getValue();
assertArrayEquals((byte[]) entity.getProperties().get("ByteField"), bytes);
assertEquals(entity.getProperties().get("BooleanField"), b);
assertTrue(dateTime.isEqual((OffsetDateTime) entity.getProperties().get("DateTimeField")));
assertEquals(entity.getProperties().get("DoubleField"), d);
assertEquals(0, uuid.compareTo((UUID) entity.getProperties().get("UuidField")));
assertEquals(entity.getProperties().get("IntField"), i);
assertEquals(entity.getProperties().get("LongField"), l);
assertEquals(entity.getProperties().get("StringField"), s);
assertEquals(entity.getProperties().get("EnumField"), color.name());
})
.expectComplete()
.verify();
}*/
@Test
void deleteTableAsync() {
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify();
}
@Test
void deleteNonExistingTableAsync() {
tableClient.deleteTable().block();
StepVerifier.create(tableClient.deleteTable())
.expectComplete()
.verify();
}
@Test
void deleteTableWithResponseAsync() {
final int expectedStatusCode = 204;
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> {
assertEquals(expectedStatusCode, response.getStatusCode());
})
.expectComplete()
.verify();
}
@Test
void deleteNonExistingTableWithResponseAsync() {
final int expectedStatusCode = 404;
tableClient.deleteTableWithResponse().block();
StepVerifier.create(tableClient.deleteTableWithResponse())
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify();
}
@Test
void deleteNonExistingEntityAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
StepVerifier.create(tableClient.deleteEntity(partitionKeyValue, rowKeyValue))
.expectComplete()
.verify();
}
@Test
void deleteEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteNonExistingEntityWithResponseAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 404;
StepVerifier.create(tableClient.deleteEntityWithResponse(entity, false))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void deleteEntityWithResponseMatchETagAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
StepVerifier.create(tableClient.deleteEntityWithResponse(createdEntity, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
void getEntityWithResponseAsync() {
getEntityWithResponseAsyncImpl(this.tableClient, this.testResourceNamer);
}
static void getEntityWithResponseAsyncImpl(TableAsyncClient tableClient, TestResourceNamer testResourceNamer) {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify();
}
@Test
void getEntityWithResponseWithSelectAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.addProperty("Test", "Value");
final int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
List<String> propertyList = new ArrayList<>();
propertyList.add("Test");
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, propertyList))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertNull(entity.getPartitionKey());
assertNull(entity.getRowKey());
assertNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertEquals(entity.getProperties().get("Test"), "Value");
})
.expectComplete()
.verify();
}
/*@Test
void getEntityWithResponseSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
byte[] bytes = new byte[]{1, 2, 3};
boolean b = true;
OffsetDateTime dateTime = OffsetDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
double d = 1.23D;
UUID uuid = UUID.fromString("11111111-2222-3333-4444-555555555555");
int i = 123;
long l = 123L;
String s = "Test";
SampleEntity.Color color = SampleEntity.Color.GREEN;
final Map<String, Object> props = new HashMap<>();
props.put("ByteField", bytes);
props.put("BooleanField", b);
props.put("DateTimeField", dateTime);
props.put("DoubleField", d);
props.put("UuidField", uuid);
props.put("IntField", i);
props.put("LongField", l);
props.put("StringField", s);
props.put("EnumField", color);
TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue);
tableEntity.setProperties(props);
int expectedStatusCode = 200;
tableClient.createEntity(tableEntity).block(TIMEOUT);
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null, SampleEntity.class))
.assertNext(response -> {
SampleEntity entity = response.getValue();
assertEquals(expectedStatusCode, response.getStatusCode());
assertNotNull(entity);
assertEquals(tableEntity.getPartitionKey(), entity.getPartitionKey());
assertEquals(tableEntity.getRowKey(), entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertArrayEquals(bytes, entity.getByteField());
assertEquals(b, entity.getBooleanField());
assertTrue(dateTime.isEqual(entity.getDateTimeField()));
assertEquals(d, entity.getDoubleField());
assertEquals(0, uuid.compareTo(entity.getUuidField()));
assertEquals(i, entity.getIntField());
assertEquals(l, entity.getLongField());
assertEquals(s, entity.getStringField());
assertEquals(color, entity.getEnumField());
})
.expectComplete()
.verify();
}*/
@Test
void updateEntityWithResponseReplaceAsync() {
updateEntityWithResponseAsync(TableEntityUpdateMode.REPLACE);
}
@Test
void updateEntityWithResponseMergeAsync() {
updateEntityWithResponseAsync(TableEntityUpdateMode.MERGE);
}
/**
* In the case of {@link TableEntityUpdateMode
* In the case of {@link TableEntityUpdateMode
*/
void updateEntityWithResponseAsync(TableEntityUpdateMode mode) {
final boolean expectOldProperty = mode == TableEntityUpdateMode.MERGE;
final String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
final int expectedStatusCode = 204;
final String oldPropertyKey = "propertyA";
final String newPropertyKey = "propertyB";
final TableEntity tableEntity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty(oldPropertyKey, "valueA");
tableClient.createEntity(tableEntity).block(TIMEOUT);
final TableEntity createdEntity = tableClient.getEntity(partitionKeyValue, rowKeyValue).block(TIMEOUT);
assertNotNull(createdEntity, "'createdEntity' should not be null.");
assertNotNull(createdEntity.getETag(), "'eTag' should not be null.");
createdEntity.getProperties().remove(oldPropertyKey);
createdEntity.addProperty(newPropertyKey, "valueB");
StepVerifier.create(tableClient.updateEntityWithResponse(createdEntity, mode, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey(newPropertyKey));
assertEquals(expectOldProperty, properties.containsKey(oldPropertyKey));
})
.verifyComplete();
}
/*@Test
void updateEntityWithResponseSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("APartitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("ARowKey", 20);
int expectedStatusCode = 204;
SingleFieldEntity tableEntity = new SingleFieldEntity(partitionKeyValue, rowKeyValue);
tableEntity.setSubclassProperty("InitialValue");
tableClient.createEntity(tableEntity).block(TIMEOUT);
tableEntity.setSubclassProperty("UpdatedValue");
StepVerifier.create(tableClient.updateEntityWithResponse(tableEntity, TableEntityUpdateMode.REPLACE, true))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getEntity(partitionKeyValue, rowKeyValue))
.assertNext(entity -> {
final Map<String, Object> properties = entity.getProperties();
assertTrue(properties.containsKey("SubclassProperty"));
assertEquals("UpdatedValue", properties.get("SubclassProperty"));
})
.verifyComplete();
}*/
@Test
void listEntitiesAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities())
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
@Test
void listEntitiesWithFilterAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setFilter("RowKey eq '" + rowKeyValue + "'");
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertEquals(partitionKeyValue, returnEntity.getPartitionKey());
assertEquals(rowKeyValue, returnEntity.getRowKey());
})
.expectNextCount(0)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
@Test
void listEntitiesWithSelectAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue)
.addProperty("propertyC", "valueC")
.addProperty("propertyD", "valueD");
List<String> propertyList = new ArrayList<>();
propertyList.add("propertyC");
ListEntitiesOptions options = new ListEntitiesOptions()
.setSelect(propertyList);
tableClient.createEntity(entity).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.assertNext(returnEntity -> {
assertNull(returnEntity.getRowKey());
assertNull(returnEntity.getPartitionKey());
assertEquals("valueC", returnEntity.getProperties().get("propertyC"));
assertNull(returnEntity.getProperties().get("propertyD"));
})
.expectComplete()
.verify();
}
@Test
void listEntitiesWithTopAsync() {
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
final String rowKeyValue3 = testResourceNamer.randomName("rowKey", 20);
ListEntitiesOptions options = new ListEntitiesOptions().setTop(2);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue3)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(options))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}
/*@Test
void listEntitiesSubclassAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValue2)).block(TIMEOUT);
StepVerifier.create(tableClient.listEntities(SampleEntity.class))
.expectNextCount(2)
.thenConsumeWhile(x -> true)
.expectComplete()
.verify();
}*/
@Test
void submitTransactionAsync() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue2)));
final Response<TableTransactionResult> result =
tableClient.submitTransactionWithResponse(transactionalBatch).block(TIMEOUT);
assertNotNull(result);
assertEquals(expectedBatchStatusCode, result.getStatusCode());
assertEquals(transactionalBatch.size(), result.getValue().getTransactionActionResponses().size());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(0).getStatusCode());
assertEquals(expectedOperationStatusCode,
result.getValue().getTransactionActionResponses().get(1).getStatusCode());
StepVerifier.create(tableClient.getEntityWithResponse(partitionKeyValue, rowKeyValue, null))
.assertNext(response -> {
final TableEntity entity = response.getValue();
assertNotNull(entity);
assertEquals(partitionKeyValue, entity.getPartitionKey());
assertEquals(rowKeyValue, entity.getRowKey());
assertNotNull(entity.getTimestamp());
assertNotNull(entity.getETag());
assertNotNull(entity.getProperties());
})
.expectComplete()
.verify();
}
@Test
void submitTransactionAsyncAllActions() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValueCreate = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertInsert = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpsertReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateMerge = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueUpdateReplace = testResourceNamer.randomName("rowKey", 20);
String rowKeyValueDelete = testResourceNamer.randomName("rowKey", 20);
int expectedBatchStatusCode = 202;
int expectedOperationStatusCode = 204;
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace)).block(TIMEOUT);
tableClient.createEntity(new TableEntity(partitionKeyValue, rowKeyValueDelete)).block(TIMEOUT);
TableEntity toUpsertMerge = new TableEntity(partitionKeyValue, rowKeyValueUpsertMerge);
toUpsertMerge.addProperty("Test", "MergedValue");
TableEntity toUpsertReplace = new TableEntity(partitionKeyValue, rowKeyValueUpsertReplace);
toUpsertReplace.addProperty("Test", "ReplacedValue");
TableEntity toUpdateMerge = new TableEntity(partitionKeyValue, rowKeyValueUpdateMerge);
toUpdateMerge.addProperty("Test", "MergedValue");
TableEntity toUpdateReplace = new TableEntity(partitionKeyValue, rowKeyValueUpdateReplace);
toUpdateReplace.addProperty("Test", "MergedValue");
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValueCreate)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE,
new TableEntity(partitionKeyValue, rowKeyValueUpsertInsert)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_MERGE, toUpsertMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPSERT_REPLACE, toUpsertReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_MERGE, toUpdateMerge));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.UPDATE_REPLACE, toUpdateReplace));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValueDelete)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.assertNext(response -> {
assertNotNull(response);
assertEquals(expectedBatchStatusCode, response.getStatusCode());
TableTransactionResult result = response.getValue();
assertEquals(transactionalBatch.size(), result.getTransactionActionResponses().size());
for (TableTransactionActionResponse subResponse : result.getTransactionActionResponses()) {
assertEquals(expectedOperationStatusCode, subResponse.getStatusCode());
}
})
.expectComplete()
.verify();
}
@Test
void submitTransactionAsyncWithFailingAction() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.CREATE,
new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(TableTransactionActionType.DELETE,
new TableEntity(partitionKeyValue, rowKeyValue2)));
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("DeleteEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify();
}
@Test
@Test
void submitTransactionAsyncWithDifferentPartitionKeys() {
String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
String partitionKeyValue2 = testResourceNamer.randomName("partitionKey", 20);
String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
String rowKeyValue2 = testResourceNamer.randomName("rowKey", 20);
List<TableTransactionAction> transactionalBatch = new ArrayList<>();
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue, rowKeyValue)));
transactionalBatch.add(new TableTransactionAction(
TableTransactionActionType.CREATE, new TableEntity(partitionKeyValue2, rowKeyValue2)));
if (IS_COSMOS_TEST) {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue)
&& e.getMessage().contains("rowKey='" + rowKeyValue))
.verify();
} else {
StepVerifier.create(tableClient.submitTransactionWithResponse(transactionalBatch))
.expectErrorMatches(e -> e instanceof TableTransactionFailedException
&& e.getMessage().contains("An action within the operation failed")
&& e.getMessage().contains("The failed operation was")
&& e.getMessage().contains("CreateEntity")
&& e.getMessage().contains("partitionKey='" + partitionKeyValue2)
&& e.getMessage().contains("rowKey='" + rowKeyValue2))
.verify();
}
}
@Test
public void generateSasTokenWithMinimumParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("r");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_ONLY;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=r"
+ "&spr=https"
+ "&sig="
)
);
}
@Test
public void generateSasTokenWithAllParameters() {
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("raud");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final OffsetDateTime startTime = OffsetDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasIpRange ipRange = TableSasIpRange.parse("a-b");
final String startPartitionKey = "startPartitionKey";
final String startRowKey = "startRowKey";
final String endPartitionKey = "endPartitionKey";
final String endRowKey = "endRowKey";
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion())
.setStartTime(startTime)
.setSasIpRange(ipRange)
.setStartPartitionKey(startPartitionKey)
.setStartRowKey(startRowKey)
.setEndPartitionKey(endPartitionKey)
.setEndRowKey(endRowKey);
final String sas = tableClient.generateSas(sasSignatureValues);
assertTrue(
sas.startsWith(
"sv=2019-02-02"
+ "&st=2015-01-01T00%3A00%3A00Z"
+ "&se=2021-12-12T00%3A00%3A00Z"
+ "&tn=" + tableClient.getTableName()
+ "&sp=raud"
+ "&spk=startPartitionKey"
+ "&srk=startRowKey"
+ "&epk=endPartitionKey"
+ "&erk=endRowKey"
+ "&sip=a-b"
+ "&spr=https%2Chttp"
+ "&sig="
)
);
}
@Test
public void canUseSasTokenToCreateValidTableClient() {
Assumptions.assumeFalse(IS_COSMOS_TEST, "Skipping Cosmos test.");
final OffsetDateTime expiryTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
final TableSasPermission permissions = TableSasPermission.parse("a");
final TableSasProtocol protocol = TableSasProtocol.HTTPS_HTTP;
final TableSasSignatureValues sasSignatureValues =
new TableSasSignatureValues(expiryTime, permissions)
.setProtocol(protocol)
.setVersion(TableServiceVersion.V2019_02_02.getVersion());
final String sas = tableClient.generateSas(sasSignatureValues);
final TableClientBuilder tableClientBuilder = new TableClientBuilder()
.httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.endpoint(tableClient.getTableEndpoint())
.sasToken(sas)
.tableName(tableClient.getTableName());
if (interceptorManager.isPlaybackMode()) {
tableClientBuilder.httpClient(playbackClient);
} else {
tableClientBuilder.httpClient(DEFAULT_HTTP_CLIENT);
if (!interceptorManager.isLiveMode()) {
tableClientBuilder.addPolicy(recordPolicy);
}
tableClientBuilder.addPolicy(new RetryPolicy(new ExponentialBackoff(6, Duration.ofMillis(1500),
Duration.ofSeconds(100))));
}
final TableAsyncClient tableAsyncClient = tableClientBuilder.buildAsyncClient();
final String partitionKeyValue = testResourceNamer.randomName("partitionKey", 20);
final String rowKeyValue = testResourceNamer.randomName("rowKey", 20);
final TableEntity entity = new TableEntity(partitionKeyValue, rowKeyValue);
final int expectedStatusCode = 204;
StepVerifier.create(tableAsyncClient.createEntityWithResponse(entity))
.assertNext(response -> assertEquals(expectedStatusCode, response.getStatusCode()))
.expectComplete()
.verify();
}
@Test
public void setAndListAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints.");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id = "testPolicy";
TableSignedIdentifier tableSignedIdentifier = new TableSignedIdentifier(id).setAccessPolicy(tableAccessPolicy);
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(Collections.singletonList(tableSignedIdentifier)))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
TableSignedIdentifier signedIdentifier = tableAccessPolicies.getIdentifiers().get(0);
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
assertEquals(id, signedIdentifier.getId());
})
.expectComplete()
.verify();
}
@Test
public void setAndListMultipleAccessPolicies() {
Assumptions.assumeFalse(IS_COSMOS_TEST,
"Setting and listing access policies is not supported on Cosmos endpoints");
OffsetDateTime startTime = OffsetDateTime.of(2021, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
OffsetDateTime expiryTime = OffsetDateTime.of(2022, 12, 12, 0, 0, 0, 0, ZoneOffset.UTC);
String permissions = "r";
TableAccessPolicy tableAccessPolicy = new TableAccessPolicy()
.setStartsOn(startTime)
.setExpiresOn(expiryTime)
.setPermissions(permissions);
String id1 = "testPolicy1";
String id2 = "testPolicy2";
List<TableSignedIdentifier> tableSignedIdentifiers = new ArrayList<>();
tableSignedIdentifiers.add(new TableSignedIdentifier(id1).setAccessPolicy(tableAccessPolicy));
tableSignedIdentifiers.add(new TableSignedIdentifier(id2).setAccessPolicy(tableAccessPolicy));
StepVerifier.create(tableClient.setAccessPoliciesWithResponse(tableSignedIdentifiers))
.assertNext(response -> assertEquals(204, response.getStatusCode()))
.expectComplete()
.verify();
StepVerifier.create(tableClient.getAccessPolicies())
.assertNext(tableAccessPolicies -> {
assertNotNull(tableAccessPolicies);
assertNotNull(tableAccessPolicies.getIdentifiers());
assertEquals(2, tableAccessPolicies.getIdentifiers().size());
assertEquals(id1, tableAccessPolicies.getIdentifiers().get(0).getId());
assertEquals(id2, tableAccessPolicies.getIdentifiers().get(1).getId());
for (TableSignedIdentifier signedIdentifier : tableAccessPolicies.getIdentifiers()) {
assertNotNull(signedIdentifier);
TableAccessPolicy accessPolicy = signedIdentifier.getAccessPolicy();
assertNotNull(accessPolicy);
assertEquals(startTime, accessPolicy.getStartsOn());
assertEquals(expiryTime, accessPolicy.getExpiresOn());
assertEquals(permissions, accessPolicy.getPermissions());
}
})
.expectComplete()
.verify();
}
}
|
The error should be updated? Let's use something like INCOMPATIBLE_ARGUMENTS. Need to update the actual error also. Maybe ``` arguments of incompatible types: expected argument(s) of types '[int]', found '[int,int,int]' ```
|
public static Object call(BFunctionPointer<Object, Object> func, Object... args) {
BFunctionType functionType = (BFunctionType) func.getType();
List<Type> paramTypes = new LinkedList<>();
List<Type> argTypes = new LinkedList<>();
List<Object> argsList = new java.util.ArrayList<>(List.of(Scheduler.getStrand()));
if (checkIsValidPositionalArgs(args, argsList, functionType, paramTypes, argTypes) ||
checkIsValidRestArgs(args, argsList, functionType, paramTypes, argTypes)) {
Type restType =
functionType.restType != null ? ((BArrayType) functionType.restType).getElementType() : null;
throw ErrorCreator.createError(
getModulePrefixedReason(FUNCTION_LANG_LIB, INCOMPATIBLE_TYPES),
BLangExceptionHelper.getErrorDetails(RuntimeErrors.INCOMPATIBLE_TYPE,
new BTupleType(paramTypes, restType), new BTupleType(argTypes)));
}
return func.asyncCall(argsList.toArray(), METADATA);
}
|
BLangExceptionHelper.getErrorDetails(RuntimeErrors.INCOMPATIBLE_TYPE,
|
public static Object call(BFunctionPointer<Object, Object> func, Object... args) {
BFunctionType functionType = (BFunctionType) func.getType();
List<Type> paramTypes = new LinkedList<>();
List<Type> argTypes = new LinkedList<>();
List<Object> argsList = new java.util.ArrayList<>(List.of(Scheduler.getStrand()));
if (checkIsValidPositionalArgs(args, argsList, functionType, paramTypes, argTypes) ||
checkIsValidRestArgs(args, argsList, functionType, paramTypes, argTypes)) {
Type restType =
functionType.restType != null ? ((BArrayType) functionType.restType).getElementType() : null;
throw ErrorCreator.createError(
getModulePrefixedReason(FUNCTION_LANG_LIB, INCOMPATIBLE_ARGUMENTS),
BLangExceptionHelper.getErrorDetails(RuntimeErrors.INCOMPATIBLE_ARGUMENTS,
new BTupleType(paramTypes, restType, 0, false), new BTupleType(argTypes)));
}
return func.asyncCall(argsList.toArray(), METADATA);
}
|
class Call {
private static final StrandMetadata METADATA = new StrandMetadata(BALLERINA_BUILTIN_PKG_PREFIX, FUNCTION_LANG_LIB,
"1.0.0", "call");
private static boolean checkIsValidPositionalArgs(Object[] args, List<Object> argsList, BFunctionType functionType,
List<Type> paramTypes, List<Type> argTypes) {
boolean errored = false;
Parameter[] parameters = functionType.parameters;
int numOfParams = parameters.length;
int numOfArgs = args.length;
for (int i = 0; i < numOfParams; i++) {
Parameter parameter = parameters[i];
Type paramType = parameter.type;
paramTypes.add(paramType);
if (i < numOfArgs) {
Object arg = args[i];
Type argType = TypeChecker.getType(arg);
argTypes.add(argType);
if (!TypeChecker.checkIsType(null, arg, argType, paramType)) {
errored = true;
}
argsList.add(arg);
argsList.add(true);
} else if (parameter.isDefault) {
argsList.add(0);
argsList.add(false);
} else {
errored = true;
}
}
return errored;
}
private static boolean checkIsValidRestArgs(Object[] args, List<Object> argsList, BFunctionType functionType,
List<Type> paramTypes, List<Type> argTypes) {
boolean errored = false;
int numOfArgs = args.length;
int numOfRestArgs = Math.max(numOfArgs - functionType.parameters.length, 0);
BArrayType restType = (BArrayType) functionType.restType;
if (restType != null) {
ListInitialValueEntry.ExpressionEntry[] initialValues =
new ListInitialValueEntry.ExpressionEntry[numOfRestArgs];
Type elementType = restType.getElementType();
for (int i = 0; i < numOfRestArgs; i++) {
Object arg = args[numOfArgs - numOfRestArgs + i];
Type argType = TypeChecker.getType(arg);
argTypes.add(argType);
if (!TypeChecker.checkIsType(null, arg, argType, elementType)) {
errored = true;
}
initialValues[i] = new ListInitialValueEntry.ExpressionEntry(arg);
}
if (!errored) {
argsList.add(new ArrayValueImpl(restType, -1L, initialValues));
argsList.add(true);
}
} else if (numOfRestArgs > 0) {
errored = true;
for (int i = numOfArgs - numOfRestArgs; i < numOfArgs; i++) {
argTypes.add(TypeChecker.getType(args[i]));
}
}
return errored;
}
}
|
class Call {
private static final StrandMetadata METADATA = new StrandMetadata(BALLERINA_BUILTIN_PKG_PREFIX, FUNCTION_LANG_LIB,
"1.0.0", "call");
private static boolean checkIsValidPositionalArgs(Object[] args, List<Object> argsList, BFunctionType functionType,
List<Type> paramTypes, List<Type> argTypes) {
boolean errored = false;
Parameter[] parameters = functionType.parameters;
int numOfParams = parameters.length;
int numOfArgs = args.length;
for (int i = 0; i < numOfParams; i++) {
Parameter parameter = parameters[i];
Type paramType = parameter.type;
paramTypes.add(paramType);
if (i < numOfArgs) {
Object arg = args[i];
Type argType = TypeChecker.getType(arg);
argTypes.add(argType);
if (!TypeChecker.checkIsType(null, arg, argType, paramType)) {
errored = true;
}
argsList.add(arg);
argsList.add(true);
} else if (parameter.isDefault) {
argsList.add(0);
argsList.add(false);
} else {
errored = true;
}
}
return errored;
}
private static boolean checkIsValidRestArgs(Object[] args, List<Object> argsList, BFunctionType functionType,
List<Type> paramTypes, List<Type> argTypes) {
boolean errored = false;
int numOfArgs = args.length;
int numOfRestArgs = Math.max(numOfArgs - functionType.parameters.length, 0);
BArrayType restType = (BArrayType) functionType.restType;
if (restType != null) {
ListInitialValueEntry.ExpressionEntry[] initialValues =
new ListInitialValueEntry.ExpressionEntry[numOfRestArgs];
Type elementType = restType.getElementType();
for (int i = 0; i < numOfRestArgs; i++) {
Object arg = args[numOfArgs - numOfRestArgs + i];
Type argType = TypeChecker.getType(arg);
argTypes.add(argType);
if (!TypeChecker.checkIsType(null, arg, argType, elementType)) {
errored = true;
}
initialValues[i] = new ListInitialValueEntry.ExpressionEntry(arg);
}
if (!errored) {
argsList.add(new ArrayValueImpl(restType, -1L, initialValues));
argsList.add(true);
}
} else if (numOfRestArgs > 0) {
errored = true;
for (int i = numOfArgs - numOfRestArgs; i < numOfArgs; i++) {
argTypes.add(TypeChecker.getType(args[i]));
}
}
return errored;
}
}
|
move the logic to a function since it's repeated.
|
private void cleanup() throws Exception {
ballerinaClient.stopServer();
Files.walk(tempHomeDirectory)
.sorted(Comparator.reverseOrder())
.forEach(path -> {
try {
Files.delete(path);
} catch (IOException e) {
Assert.fail(e.getMessage(), e);
}
});
Files.walk(tempProjectDirectory)
.sorted(Comparator.reverseOrder())
.forEach(path -> {
try {
Files.delete(path);
} catch (IOException e) {
Assert.fail(e.getMessage(), e);
}
});
}
|
Files.walk(tempHomeDirectory)
|
private void cleanup() throws Exception {
ballerinaClient.stopServer();
deleteFiles(tempHomeDirectory);
deleteFiles(tempProjectDirectory);
}
|
class PackagingPushTestCase extends IntegrationTestCase {
private ServerInstance ballerinaClient;
private String serverZipPath;
private Path tempHomeDirectory;
private Path tempProjectDirectory;
private Path projectPath;
private String packageName = "test";
@BeforeClass()
public void setUp() throws BallerinaTestException, IOException {
tempHomeDirectory = Files.createTempDirectory("bal-test-integration-packaging-home-");
tempProjectDirectory = Files.createTempDirectory("bal-test-integration-packaging-project-");
serverZipPath = System.getProperty(Constant.SYSTEM_PROP_SERVER_ZIP);
createSettingToml();
packageName = packageName + randomPackageName(10);
projectPath = tempProjectDirectory.resolve("myproject");
Files.createDirectory(projectPath);
createBallerinaToml();
Path generatedPackagePath = Paths.get(ProjectDirConstants.DOT_BALLERINA_DIR_NAME,
ProjectDirConstants.DOT_BALLERINA_REPO_DIR_NAME,
"integrationtests",
packageName,
"1.0.0");
Files.createDirectories(projectPath.resolve(generatedPackagePath));
createProjectArchive(generatedPackagePath);
}
/**
* Create Ballerina.toml inside project.
*
* @throws IOException i/o exception when writing to file
*/
private void createBallerinaToml() throws IOException {
Path ballerinaToml = projectPath.resolve("Ballerina.toml");
String ballerinaTomlContent = "[project]\n org-name = \"integrationtests\"\n version = \"1.0.0\"";
Files.write(ballerinaToml, ballerinaTomlContent.getBytes(), StandardOpenOption.CREATE);
}
/**
* Create Settings.toml inside the home repository.
*
* @throws IOException i/o exception when writing to file
*/
private void createSettingToml() throws IOException {
Path tomlFilePath = tempHomeDirectory.resolve("Settings.toml");
String content = "[central]\n accesstoken = \"0f647e67-857d-32e8-a679-bd3c1c3a7eb2\"";
Files.write(tomlFilePath, content.getBytes(), StandardOpenOption.CREATE);
}
/**
* Create the content inside the zipped artifact.
*
* @param generatedPackagePath path of the package to place the artifact
* @throws IOException i/o exception when manipulating files
*/
private void createProjectArchive(Path generatedPackagePath) throws IOException {
Path tempDir = tempProjectDirectory.resolve(packageName);
Path src = tempDir.resolve("src").resolve(packageName);
Files.createDirectories(src);
Path srcFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"functions.bal").getAbsolutePath());
Files.copy(srcFilePath, src.resolve("functions.bal"));
Path obj = tempDir.resolve("obj");
Files.createDirectories(obj);
Path objFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"my.app.balo").getAbsolutePath());
Files.copy(objFilePath, obj.resolve(packageName + ".balo"));
Path mdDir = tempDir.resolve(packageName);
Files.createDirectories(mdDir);
Path mdFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"Package.md").getAbsolutePath());
Files.copy(mdFilePath, mdDir.resolve("Package.md"));
compressFiles(tempDir, new FileOutputStream(projectPath.resolve(generatedPackagePath)
.resolve(packageName + ".zip").toFile()));
}
@Test(description = "Test pushing a package to central")
public void testPush() throws Exception {
ballerinaClient = new ServerInstance(serverZipPath);
String sourceRootPath = projectPath.toString();
String[] clientArgs = {"--sourceroot", sourceRootPath, packageName};
String msg = "integrationtests/" + packageName + ":1.0.0 [project repo -> central]";
LogLeecher clientLeecher = new LogLeecher(msg);
ballerinaClient.addLogLeecher(clientLeecher);
ballerinaClient.runMain(clientArgs, getEnvVariables(), "push");
clientLeecher.waitForText(5000);
String stagingURL = "https:
HttpResponse response = HttpClientRequest.doGet(stagingURL + "integrationtests/" + packageName
+ "/1.0.0");
Assert.assertEquals(response.getResponseCode(), 200, "Response code mismatched");
}
/**
* Get environment variables and add ballerina_home as a env variable the tmp directory.
*
* @return env directory variable array
*/
private String[] getEnvVariables() {
List<String> variables = new ArrayList<>();
Map<String, String> envVarMap = System.getenv();
envVarMap.forEach((key, value) -> variables.add(key + "=" + value));
variables.add(ProjectDirConstants.HOME_REPO_ENV_KEY + "=" + tempHomeDirectory.toString());
variables.add("BALLERINA_DEV_STAGE_CENTRAL" + "=" + "true");
return variables.toArray(new String[0]);
}
/**
* Generate random package name.
*
* @param count number of characters required
* @return generated name
*/
public String randomPackageName(int count) {
String upperCaseAlpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
String lowerCaseAlpha = "abcdefghijklmnopqrstuvwxyz";
String alpha = upperCaseAlpha + lowerCaseAlpha;
StringBuilder builder = new StringBuilder();
while (count-- != 0) {
int character = (int) (Math.random() * alpha.length());
builder.append(alpha.charAt(character));
}
return builder.toString();
}
@AfterClass
/**
* Add file inside the src directory to the ZipOutputStream.
*
* @param zos ZipOutputStream
* @param filePath file path of each file inside the driectory
* @throws IOException exception if an error occurrs when compressing
*/
private static void addEntry(ZipOutputStream zos, Path filePath, String fileStr) throws IOException {
ZipEntry ze = new ZipEntry(fileStr);
zos.putNextEntry(ze);
Files.copy(filePath, zos);
zos.closeEntry();
}
/**
* Compresses files.
*
* @param outputStream outputstream
* @throws IOException exception if an error occurrs when compressing
*/
private static void compressFiles(Path dir, OutputStream outputStream) throws IOException {
ZipOutputStream zos = new ZipOutputStream(outputStream);
if (Files.isRegularFile(dir)) {
Path fileName = dir.getFileName();
if (fileName != null) {
addEntry(zos, dir, fileName.toString());
} else {
throw new RuntimeException("Error occurred when compressing");
}
} else {
Stream<Path> list = Files.walk(dir);
list.forEach(p -> {
StringJoiner joiner = new StringJoiner("/");
for (Path path : dir.relativize(p)) {
joiner.add(path.toString());
}
if (Files.isRegularFile(p)) {
try {
addEntry(zos, p, joiner.toString());
} catch (IOException ignore) {
}
}
});
}
zos.close();
}
}
|
class PackagingPushTestCase extends IntegrationTestCase {
private ServerInstance ballerinaClient;
private String serverZipPath;
private Path tempHomeDirectory;
private Path tempProjectDirectory;
private Path projectPath;
private String packageName = "test";
@BeforeClass()
public void setUp() throws BallerinaTestException, IOException {
tempHomeDirectory = Files.createTempDirectory("bal-test-integration-packaging-home-");
tempProjectDirectory = Files.createTempDirectory("bal-test-integration-packaging-project-");
serverZipPath = System.getProperty(Constant.SYSTEM_PROP_SERVER_ZIP);
createSettingToml();
packageName = packageName + randomPackageName(10);
projectPath = tempProjectDirectory.resolve("myproject");
Files.createDirectory(projectPath);
createBallerinaToml();
Path generatedPackagePath = Paths.get(ProjectDirConstants.DOT_BALLERINA_DIR_NAME,
ProjectDirConstants.DOT_BALLERINA_REPO_DIR_NAME,
"integrationtests",
packageName,
"1.0.0");
Files.createDirectories(projectPath.resolve(generatedPackagePath));
createProjectArchive(generatedPackagePath);
}
/**
* Create Ballerina.toml inside project.
*
* @throws IOException i/o exception when writing to file
*/
private void createBallerinaToml() throws IOException {
Path ballerinaToml = projectPath.resolve("Ballerina.toml");
String ballerinaTomlContent = "[project]\n org-name = \"integrationtests\"\n version = \"1.0.0\"";
Files.write(ballerinaToml, ballerinaTomlContent.getBytes(), StandardOpenOption.CREATE);
}
/**
* Create Settings.toml inside the home repository.
*
* @throws IOException i/o exception when writing to file
*/
private void createSettingToml() throws IOException {
Path tomlFilePath = tempHomeDirectory.resolve("Settings.toml");
String content = "[central]\n accesstoken = \"0f647e67-857d-32e8-a679-bd3c1c3a7eb2\"";
Files.write(tomlFilePath, content.getBytes(), StandardOpenOption.CREATE);
}
/**
* Create the content inside the zipped artifact.
*
* @param generatedPackagePath path of the package to place the artifact
* @throws IOException i/o exception when manipulating files
*/
private void createProjectArchive(Path generatedPackagePath) throws IOException {
Path tempDir = tempProjectDirectory.resolve(packageName);
Path src = tempDir.resolve("src").resolve(packageName);
Files.createDirectories(src);
Path srcFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"functions.bal").getAbsolutePath());
Files.copy(srcFilePath, src.resolve("functions.bal"));
Path obj = tempDir.resolve("obj");
Files.createDirectories(obj);
Path objFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"my.app.balo").getAbsolutePath());
Files.copy(objFilePath, obj.resolve(packageName + ".balo"));
Path mdDir = tempDir.resolve(packageName);
Files.createDirectories(mdDir);
Path mdFilePath = Paths.get(new File("src" + File.separator + "test" + File.separator + "resources"
+ File.separator + "packaging" + File.separator +
"Package.md").getAbsolutePath());
Files.copy(mdFilePath, mdDir.resolve("Package.md"));
compressFiles(tempDir, projectPath.resolve(generatedPackagePath).resolve(packageName + ".zip"));
}
@Test(description = "Test pushing a package to central")
public void testPush() throws Exception {
ballerinaClient = new ServerInstance(serverZipPath);
String sourceRootPath = projectPath.toString();
String[] clientArgs = {"--sourceroot", sourceRootPath, packageName};
String msg = "integrationtests/" + packageName + ":1.0.0 [project repo -> central]";
LogLeecher clientLeecher = new LogLeecher(msg);
ballerinaClient.addLogLeecher(clientLeecher);
ballerinaClient.runMain(clientArgs, getEnvVariables(), "push");
clientLeecher.waitForText(5000);
String stagingURL = "https:
HttpResponse response = HttpClientRequest.doGet(stagingURL + "integrationtests/" + packageName
+ "/1.0.0");
Assert.assertEquals(response.getResponseCode(), 200, "Response code mismatched");
}
/**
* Get environment variables and add ballerina_home as a env variable the tmp directory.
*
* @return env directory variable array
*/
private String[] getEnvVariables() {
List<String> variables = new ArrayList<>();
Map<String, String> envVarMap = System.getenv();
envVarMap.forEach((key, value) -> variables.add(key + "=" + value));
variables.add(ProjectDirConstants.HOME_REPO_ENV_KEY + "=" + tempHomeDirectory.toString());
variables.add("BALLERINA_DEV_STAGE_CENTRAL" + "=" + "true");
return variables.toArray(new String[variables.size()]);
}
/**
* Generate random package name.
*
* @param count number of characters required
* @return generated name
*/
private String randomPackageName(int count) {
String upperCaseAlpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
String lowerCaseAlpha = "abcdefghijklmnopqrstuvwxyz";
String alpha = upperCaseAlpha + lowerCaseAlpha;
StringBuilder builder = new StringBuilder();
while (count-- != 0) {
int character = (int) (Math.random() * alpha.length());
builder.append(alpha.charAt(character));
}
return builder.toString();
}
@AfterClass
/**
* Delete files inside directories.
*
* @param dirPath direectory path
* @throws IOException throw an exception if an issue occurs
*/
private void deleteFiles(Path dirPath) throws IOException {
Files.walk(dirPath)
.sorted(Comparator.reverseOrder())
.forEach(path -> {
try {
Files.delete(path);
} catch (IOException e) {
Assert.fail(e.getMessage(), e);
}
});
}
/**
* Compress files.
*
* @param sourceDirPath source directory path to be compressed
* @param zipFilePath destination directory path
* @throws IOException throw I/O exception if an issue occurs
*/
private static void compressFiles(Path sourceDirPath, Path zipFilePath) throws IOException {
Files.deleteIfExists(zipFilePath);
Map<String, String> env = new HashMap<>();
env.put("create", "true");
URI uri = URI.create("jar:" + zipFilePath.toUri());
try (FileSystem zipfs = FileSystems.newFileSystem(uri, env)) {
Files.walk(sourceDirPath)
.filter(d -> !d.equals(sourceDirPath))
.forEach(fileToZip -> {
Path pathInZipFile = zipfs.getPath("/");
for (Path part : sourceDirPath.relativize(fileToZip)) {
pathInZipFile = pathInZipFile.resolve(part.toString());
}
try {
Files.copy(fileToZip, pathInZipFile);
} catch (IOException e) {
Assert.fail(e.getMessage(), e);
}
});
} catch (UncheckedIOException e) {
Assert.fail(e.getMessage(), e);
}
}
}
|
`dataRecords` might be empty, it's the same as before in this case
|
private void dump(final PipelineTableMetaData tableMetaData, final Connection connection) throws SQLException {
int batchSize = dumperConfig.getBatchSize();
DatabaseType databaseType = dumperConfig.getDataSourceConfig().getDatabaseType();
if (null != dumperConfig.getTransactionIsolation()) {
connection.setTransactionIsolation(dumperConfig.getTransactionIsolation());
}
try (PreparedStatement preparedStatement = JDBCStreamQueryUtils.generateStreamQueryPreparedStatement(databaseType, connection, buildInventoryDumpSQL())) {
dumpStatement.set(preparedStatement);
if (!(databaseType instanceof MySQLDatabaseType)) {
preparedStatement.setFetchSize(batchSize);
}
setParameters(preparedStatement);
try (ResultSet resultSet = preparedStatement.executeQuery()) {
int rowCount = 0;
JobRateLimitAlgorithm rateLimitAlgorithm = dumperConfig.getRateLimitAlgorithm();
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
List<Record> dataRecords = new LinkedList<>();
while (resultSet.next()) {
if (dataRecords.size() >= batchSize) {
channel.pushRecords(dataRecords);
dataRecords = new LinkedList<>();
}
dataRecords.add(loadDataRecord(resultSet, resultSetMetaData, tableMetaData));
++rowCount;
if (!isRunning()) {
log.info("Broke because of inventory dump is not running.");
break;
}
if (null != rateLimitAlgorithm && 0 == rowCount % batchSize) {
rateLimitAlgorithm.intercept(JobOperationType.SELECT, 1);
}
}
dataRecords.add(new FinishedRecord(new FinishedPosition()));
channel.pushRecords(dataRecords);
dumpStatement.set(null);
log.info("Inventory dump done, rowCount={}", rowCount);
}
}
}
|
channel.pushRecords(dataRecords);
|
private void dump(final PipelineTableMetaData tableMetaData, final Connection connection) throws SQLException {
int batchSize = dumperConfig.getBatchSize();
DatabaseType databaseType = dumperConfig.getDataSourceConfig().getDatabaseType();
if (null != dumperConfig.getTransactionIsolation()) {
connection.setTransactionIsolation(dumperConfig.getTransactionIsolation());
}
try (PreparedStatement preparedStatement = JDBCStreamQueryUtils.generateStreamQueryPreparedStatement(databaseType, connection, buildInventoryDumpSQL())) {
dumpStatement.set(preparedStatement);
if (!(databaseType instanceof MySQLDatabaseType)) {
preparedStatement.setFetchSize(batchSize);
}
setParameters(preparedStatement);
try (ResultSet resultSet = preparedStatement.executeQuery()) {
int rowCount = 0;
JobRateLimitAlgorithm rateLimitAlgorithm = dumperConfig.getRateLimitAlgorithm();
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
List<Record> dataRecords = new LinkedList<>();
while (resultSet.next()) {
if (dataRecords.size() >= batchSize) {
channel.pushRecords(dataRecords);
dataRecords = new LinkedList<>();
}
dataRecords.add(loadDataRecord(resultSet, resultSetMetaData, tableMetaData));
++rowCount;
if (!isRunning()) {
log.info("Broke because of inventory dump is not running.");
break;
}
if (null != rateLimitAlgorithm && 0 == rowCount % batchSize) {
rateLimitAlgorithm.intercept(JobOperationType.SELECT, 1);
}
}
dataRecords.add(new FinishedRecord(new FinishedPosition()));
channel.pushRecords(dataRecords);
dumpStatement.set(null);
log.info("Inventory dump done, rowCount={}", rowCount);
}
}
}
|
class InventoryDumper extends AbstractLifecycleExecutor implements Dumper {
@Getter(AccessLevel.PROTECTED)
private final InventoryDumperConfiguration dumperConfig;
private final PipelineChannel channel;
private final DataSource dataSource;
private final PipelineSQLBuilder sqlBuilder;
private final ColumnValueReader columnValueReader;
private final PipelineTableMetaDataLoader metaDataLoader;
private final AtomicReference<Statement> dumpStatement = new AtomicReference<>();
public InventoryDumper(final InventoryDumperConfiguration dumperConfig, final PipelineChannel channel, final DataSource dataSource, final PipelineTableMetaDataLoader metaDataLoader) {
this.dumperConfig = dumperConfig;
this.channel = channel;
this.dataSource = dataSource;
String databaseType = dumperConfig.getDataSourceConfig().getDatabaseType().getType();
sqlBuilder = PipelineTypedSPILoader.getDatabaseTypedService(PipelineSQLBuilder.class, databaseType);
columnValueReader = PipelineTypedSPILoader.getDatabaseTypedService(ColumnValueReader.class, databaseType);
this.metaDataLoader = metaDataLoader;
}
@Override
protected void runBlocking() {
IngestPosition position = dumperConfig.getPosition();
if (position instanceof FinishedPosition) {
log.info("Ignored because of already finished.");
return;
}
PipelineTableMetaData tableMetaData = metaDataLoader.getTableMetaData(dumperConfig.getSchemaName(new LogicTableName(dumperConfig.getLogicTableName())), dumperConfig.getActualTableName());
try (Connection connection = dataSource.getConnection()) {
dump(tableMetaData, connection);
} catch (final SQLException ex) {
log.error("Inventory dump, ex caught, msg={}.", ex.getMessage());
throw new IngestException("Inventory dump failed on " + dumperConfig.getActualTableName(), ex);
}
}
private String buildInventoryDumpSQL() {
if (!Strings.isNullOrEmpty(dumperConfig.getQuerySQL())) {
return dumperConfig.getQuerySQL();
}
LogicTableName logicTableName = new LogicTableName(dumperConfig.getLogicTableName());
String schemaName = dumperConfig.getSchemaName(logicTableName);
if (!dumperConfig.hasUniqueKey()) {
return sqlBuilder.buildNoUniqueKeyInventoryDumpSQL(schemaName, dumperConfig.getActualTableName());
}
PrimaryKeyPosition<?> position = (PrimaryKeyPosition<?>) dumperConfig.getPosition();
PipelineColumnMetaData firstColumn = dumperConfig.getUniqueKeyColumns().get(0);
List<String> columnNames = dumperConfig.getColumnNameList(logicTableName).orElse(Collections.singletonList("*"));
if (PipelineJdbcUtils.isIntegerColumn(firstColumn.getDataType()) || PipelineJdbcUtils.isStringColumn(firstColumn.getDataType())) {
if (null != position.getBeginValue() && null != position.getEndValue()) {
return sqlBuilder.buildDivisibleInventoryDumpSQL(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
if (null != position.getBeginValue() && null == position.getEndValue()) {
return sqlBuilder.buildDivisibleInventoryDumpSQLNoEnd(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
}
return sqlBuilder.buildIndivisibleInventoryDumpSQL(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
private void setParameters(final PreparedStatement preparedStatement) throws SQLException {
if (!dumperConfig.hasUniqueKey()) {
return;
}
PipelineColumnMetaData firstColumn = dumperConfig.getUniqueKeyColumns().get(0);
PrimaryKeyPosition<?> position = (PrimaryKeyPosition<?>) dumperConfig.getPosition();
if (PipelineJdbcUtils.isIntegerColumn(firstColumn.getDataType()) && null != position.getBeginValue() && null != position.getEndValue()) {
preparedStatement.setObject(1, position.getBeginValue());
preparedStatement.setObject(2, position.getEndValue());
return;
}
if (PipelineJdbcUtils.isStringColumn(firstColumn.getDataType())) {
if (null != position.getBeginValue()) {
preparedStatement.setObject(1, position.getBeginValue());
}
if (null != position.getEndValue()) {
preparedStatement.setObject(2, position.getEndValue());
}
}
}
private DataRecord loadDataRecord(final ResultSet resultSet, final ResultSetMetaData resultSetMetaData, final PipelineTableMetaData tableMetaData) throws SQLException {
int columnCount = resultSetMetaData.getColumnCount();
DataRecord result = new DataRecord(newPosition(resultSet), columnCount);
result.setType(IngestDataChangeType.INSERT);
result.setTableName(dumperConfig.getLogicTableName());
List<String> insertColumnNames = Optional.ofNullable(dumperConfig.getInsertColumnNames()).orElse(Collections.emptyList());
ShardingSpherePreconditions.checkState(insertColumnNames.isEmpty() || insertColumnNames.size() == resultSetMetaData.getColumnCount(),
() -> new PipelineInvalidParameterException("Insert colum names count not equals ResultSet column count"));
for (int i = 1; i <= columnCount; i++) {
String columnName = insertColumnNames.isEmpty() ? resultSetMetaData.getColumnName(i) : insertColumnNames.get(i - 1);
ShardingSpherePreconditions.checkNotNull(tableMetaData.getColumnMetaData(columnName), () -> new PipelineInvalidParameterException(String.format("Column name is %s", columnName)));
boolean isUniqueKey = tableMetaData.getColumnMetaData(columnName).isUniqueKey();
result.addColumn(new Column(columnName, columnValueReader.readValue(resultSet, resultSetMetaData, i), true, isUniqueKey));
}
return result;
}
private IngestPosition newPosition(final ResultSet resultSet) throws SQLException {
return dumperConfig.hasUniqueKey()
? PrimaryKeyPositionFactory.newInstance(resultSet.getObject(dumperConfig.getUniqueKeyColumns().get(0).getName()), ((PrimaryKeyPosition<?>) dumperConfig.getPosition()).getEndValue())
: new PlaceholderPosition();
}
@Override
protected void doStop() throws SQLException {
cancelStatement(dumpStatement.get());
}
}
|
class InventoryDumper extends AbstractLifecycleExecutor implements Dumper {
@Getter(AccessLevel.PROTECTED)
private final InventoryDumperConfiguration dumperConfig;
private final PipelineChannel channel;
private final DataSource dataSource;
private final PipelineSQLBuilder sqlBuilder;
private final ColumnValueReader columnValueReader;
private final PipelineTableMetaDataLoader metaDataLoader;
private final AtomicReference<Statement> dumpStatement = new AtomicReference<>();
public InventoryDumper(final InventoryDumperConfiguration dumperConfig, final PipelineChannel channel, final DataSource dataSource, final PipelineTableMetaDataLoader metaDataLoader) {
this.dumperConfig = dumperConfig;
this.channel = channel;
this.dataSource = dataSource;
String databaseType = dumperConfig.getDataSourceConfig().getDatabaseType().getType();
sqlBuilder = PipelineTypedSPILoader.getDatabaseTypedService(PipelineSQLBuilder.class, databaseType);
columnValueReader = PipelineTypedSPILoader.getDatabaseTypedService(ColumnValueReader.class, databaseType);
this.metaDataLoader = metaDataLoader;
}
@Override
protected void runBlocking() {
IngestPosition position = dumperConfig.getPosition();
if (position instanceof FinishedPosition) {
log.info("Ignored because of already finished.");
return;
}
PipelineTableMetaData tableMetaData = metaDataLoader.getTableMetaData(dumperConfig.getSchemaName(new LogicTableName(dumperConfig.getLogicTableName())), dumperConfig.getActualTableName());
try (Connection connection = dataSource.getConnection()) {
dump(tableMetaData, connection);
} catch (final SQLException ex) {
log.error("Inventory dump, ex caught, msg={}.", ex.getMessage());
throw new IngestException("Inventory dump failed on " + dumperConfig.getActualTableName(), ex);
}
}
private String buildInventoryDumpSQL() {
if (!Strings.isNullOrEmpty(dumperConfig.getQuerySQL())) {
return dumperConfig.getQuerySQL();
}
LogicTableName logicTableName = new LogicTableName(dumperConfig.getLogicTableName());
String schemaName = dumperConfig.getSchemaName(logicTableName);
if (!dumperConfig.hasUniqueKey()) {
return sqlBuilder.buildNoUniqueKeyInventoryDumpSQL(schemaName, dumperConfig.getActualTableName());
}
PrimaryKeyPosition<?> position = (PrimaryKeyPosition<?>) dumperConfig.getPosition();
PipelineColumnMetaData firstColumn = dumperConfig.getUniqueKeyColumns().get(0);
List<String> columnNames = dumperConfig.getColumnNameList(logicTableName).orElse(Collections.singletonList("*"));
if (PipelineJdbcUtils.isIntegerColumn(firstColumn.getDataType()) || PipelineJdbcUtils.isStringColumn(firstColumn.getDataType())) {
if (null != position.getBeginValue() && null != position.getEndValue()) {
return sqlBuilder.buildDivisibleInventoryDumpSQL(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
if (null != position.getBeginValue() && null == position.getEndValue()) {
return sqlBuilder.buildDivisibleInventoryDumpSQLNoEnd(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
}
return sqlBuilder.buildIndivisibleInventoryDumpSQL(schemaName, dumperConfig.getActualTableName(), columnNames, firstColumn.getName());
}
private void setParameters(final PreparedStatement preparedStatement) throws SQLException {
if (!dumperConfig.hasUniqueKey()) {
return;
}
PipelineColumnMetaData firstColumn = dumperConfig.getUniqueKeyColumns().get(0);
PrimaryKeyPosition<?> position = (PrimaryKeyPosition<?>) dumperConfig.getPosition();
if (PipelineJdbcUtils.isIntegerColumn(firstColumn.getDataType()) && null != position.getBeginValue() && null != position.getEndValue()) {
preparedStatement.setObject(1, position.getBeginValue());
preparedStatement.setObject(2, position.getEndValue());
return;
}
if (PipelineJdbcUtils.isStringColumn(firstColumn.getDataType())) {
if (null != position.getBeginValue()) {
preparedStatement.setObject(1, position.getBeginValue());
}
if (null != position.getEndValue()) {
preparedStatement.setObject(2, position.getEndValue());
}
}
}
private DataRecord loadDataRecord(final ResultSet resultSet, final ResultSetMetaData resultSetMetaData, final PipelineTableMetaData tableMetaData) throws SQLException {
int columnCount = resultSetMetaData.getColumnCount();
DataRecord result = new DataRecord(newPosition(resultSet), columnCount);
result.setType(IngestDataChangeType.INSERT);
result.setTableName(dumperConfig.getLogicTableName());
List<String> insertColumnNames = Optional.ofNullable(dumperConfig.getInsertColumnNames()).orElse(Collections.emptyList());
ShardingSpherePreconditions.checkState(insertColumnNames.isEmpty() || insertColumnNames.size() == resultSetMetaData.getColumnCount(),
() -> new PipelineInvalidParameterException("Insert colum names count not equals ResultSet column count"));
for (int i = 1; i <= columnCount; i++) {
String columnName = insertColumnNames.isEmpty() ? resultSetMetaData.getColumnName(i) : insertColumnNames.get(i - 1);
ShardingSpherePreconditions.checkNotNull(tableMetaData.getColumnMetaData(columnName), () -> new PipelineInvalidParameterException(String.format("Column name is %s", columnName)));
boolean isUniqueKey = tableMetaData.getColumnMetaData(columnName).isUniqueKey();
result.addColumn(new Column(columnName, columnValueReader.readValue(resultSet, resultSetMetaData, i), true, isUniqueKey));
}
return result;
}
private IngestPosition newPosition(final ResultSet resultSet) throws SQLException {
return dumperConfig.hasUniqueKey()
? PrimaryKeyPositionFactory.newInstance(resultSet.getObject(dumperConfig.getUniqueKeyColumns().get(0).getName()), ((PrimaryKeyPosition<?>) dumperConfig.getPosition()).getEndValue())
: new PlaceholderPosition();
}
@Override
protected void doStop() throws SQLException {
cancelStatement(dumpStatement.get());
}
}
|
sequence column will keep the original value
|
private void constructInsertStmt() {
FromClause fromUsedInInsert;
TableRef tableRef = new TableRef(tableName, null);
if (fromClause == null) {
fromUsedInInsert = new FromClause(Lists.newArrayList(tableRef));
} else {
fromUsedInInsert = fromClause.clone();
fromUsedInInsert.getTableRefs().add(0, tableRef);
}
SelectStmt selectStmt = new SelectStmt(
new SelectList(selectListItems, false),
fromUsedInInsert,
whereExpr,
null,
null,
null,
LimitElement.NO_LIMIT
);
insertStmt = new InsertStmt(
new InsertTarget(tableName, null),
null,
cols,
new InsertSource(selectStmt),
null);
}
|
private void constructInsertStmt() {
FromClause fromUsedInInsert;
TableRef tableRef = new TableRef(tableName, null);
if (fromClause == null) {
fromUsedInInsert = new FromClause(Lists.newArrayList(tableRef));
} else {
fromUsedInInsert = fromClause.clone();
fromUsedInInsert.getTableRefs().add(0, tableRef);
}
SelectStmt selectStmt = new SelectStmt(
new SelectList(selectListItems, false),
fromUsedInInsert,
whereExpr,
null,
null,
null,
LimitElement.NO_LIMIT
);
insertStmt = new InsertStmt(
new InsertTarget(tableName, null),
null,
cols,
new InsertSource(selectStmt),
null);
}
|
class UpdateStmt extends DdlStmt {
private final TableName tableName;
private final List<BinaryPredicate> setExprs;
private final Expr whereExpr;
private final FromClause fromClause;
private InsertStmt insertStmt;
private Table targetTable;
List<SelectListItem> selectListItems = Lists.newArrayList();
List<String> cols = Lists.newArrayList();
public UpdateStmt(TableName tableName, List<BinaryPredicate> setExprs, FromClause fromClause, Expr whereExpr) {
this.tableName = tableName;
this.setExprs = setExprs;
this.fromClause = fromClause;
this.whereExpr = whereExpr;
}
public InsertStmt getInsertStmt() {
return insertStmt;
}
@Override
public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
analyzeTargetTable(analyzer);
analyzeSetExprs(analyzer);
constructInsertStmt();
}
private void analyzeTargetTable(Analyzer analyzer) throws AnalysisException {
tableName.analyze(analyzer);
Util.prohibitExternalCatalog(tableName.getCtl(), this.getClass().getSimpleName());
if (!Env.getCurrentEnv().getAccessManager()
.checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.LOAD)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "LOAD");
}
String dbName = tableName.getDb();
String targetTableName = tableName.getTbl();
Preconditions.checkNotNull(dbName);
Preconditions.checkNotNull(targetTableName);
Database database = Env.getCurrentInternalCatalog().getDbOrAnalysisException(dbName);
targetTable = database.getTableOrAnalysisException(tableName.getTbl());
if (targetTable.getType() != Table.TableType.OLAP
|| ((OlapTable) targetTable).getKeysType() != KeysType.UNIQUE_KEYS) {
throw new AnalysisException("Only unique table could be updated.");
}
targetTable.readLock();
try {
analyzer.registerOlapTable(targetTable, tableName, null);
} finally {
targetTable.readUnlock();
}
}
private void analyzeSetExprs(Analyzer analyzer) throws AnalysisException {
Set<String> columnMappingNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
for (BinaryPredicate predicate : setExprs) {
if (predicate.getOp() != BinaryPredicate.Operator.EQ) {
throw new AnalysisException("Set function expr only support eq binary predicate. "
+ "The predicate operator error, op: " + predicate.getOp());
}
Expr lhs = predicate.getChild(0);
if (!(lhs instanceof SlotRef)) {
throw new AnalysisException("Set function expr only support eq binary predicate "
+ "which child(0) must be a column name. "
+ "The child(0) expr error. expr: " + lhs.toSql());
}
String column = ((SlotRef) lhs).getColumnName();
if (!columnMappingNames.add(column)) {
throw new AnalysisException("Duplicate column setting: " + column);
}
}
for (BinaryPredicate setExpr : setExprs) {
Expr lhs = setExpr.getChild(0);
if (!(lhs instanceof SlotRef)) {
throw new AnalysisException("The left side of the set expr must be the column name");
}
lhs.analyze(analyzer);
if (((SlotRef) lhs).getColumn().isKey()) {
throw new AnalysisException("Only value columns of unique table could be updated");
}
}
for (Column column : targetTable.getColumns()) {
Expr expr = new SlotRef(tableName, column.getName());
for (BinaryPredicate setExpr : setExprs) {
Expr lhs = setExpr.getChild(0);
if (((SlotRef) lhs).getColumn().equals(column)) {
expr = setExpr.getChild(1);
}
}
selectListItems.add(new SelectListItem(expr, null));
cols.add(column.getName());
}
}
@Override
public String toSql() {
StringBuilder sb = new StringBuilder("UPDATE ");
sb.append(tableName.toSql()).append("\n");
sb.append(" ").append("SET ");
for (Expr setExpr : setExprs) {
sb.append(setExpr.toSql()).append(", ");
}
if (fromClause != null) {
sb.append("\n").append(fromClause.toSql());
}
sb.append("\n");
if (whereExpr != null) {
sb.append(" ").append("WHERE ").append(whereExpr.toSql());
}
return sb.toString();
}
}
|
class UpdateStmt extends DdlStmt {
private final TableName tableName;
private final List<BinaryPredicate> setExprs;
private final Expr whereExpr;
private final FromClause fromClause;
private InsertStmt insertStmt;
private Table targetTable;
List<SelectListItem> selectListItems = Lists.newArrayList();
List<String> cols = Lists.newArrayList();
public UpdateStmt(TableName tableName, List<BinaryPredicate> setExprs, FromClause fromClause, Expr whereExpr) {
this.tableName = tableName;
this.setExprs = setExprs;
this.fromClause = fromClause;
this.whereExpr = whereExpr;
}
public InsertStmt getInsertStmt() {
return insertStmt;
}
@Override
public void analyze(Analyzer analyzer) throws UserException {
super.analyze(analyzer);
if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isInDebugMode()) {
throw new AnalysisException("Update is forbidden since current session is in debug mode."
+ " Please check the following session variables: "
+ String.join(", ", SessionVariable.DEBUG_VARIABLES));
}
analyzeTargetTable(analyzer);
analyzeSetExprs(analyzer);
constructInsertStmt();
}
private void analyzeTargetTable(Analyzer analyzer) throws AnalysisException {
tableName.analyze(analyzer);
Util.prohibitExternalCatalog(tableName.getCtl(), this.getClass().getSimpleName());
if (!Env.getCurrentEnv().getAccessManager()
.checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.LOAD)) {
ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "LOAD");
}
String dbName = tableName.getDb();
String targetTableName = tableName.getTbl();
Preconditions.checkNotNull(dbName);
Preconditions.checkNotNull(targetTableName);
Database database = Env.getCurrentInternalCatalog().getDbOrAnalysisException(dbName);
targetTable = database.getTableOrAnalysisException(tableName.getTbl());
if (targetTable.getType() != Table.TableType.OLAP
|| ((OlapTable) targetTable).getKeysType() != KeysType.UNIQUE_KEYS) {
throw new AnalysisException("Only unique table could be updated.");
}
targetTable.readLock();
try {
analyzer.registerOlapTable(targetTable, tableName, null);
} finally {
targetTable.readUnlock();
}
}
private void analyzeSetExprs(Analyzer analyzer) throws AnalysisException {
Set<String> columnMappingNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
for (BinaryPredicate predicate : setExprs) {
if (predicate.getOp() != BinaryPredicate.Operator.EQ) {
throw new AnalysisException("Set function expr only support eq binary predicate. "
+ "The predicate operator error, op: " + predicate.getOp());
}
Expr lhs = predicate.getChild(0);
if (!(lhs instanceof SlotRef)) {
throw new AnalysisException("Set function expr only support eq binary predicate "
+ "which child(0) must be a column name. "
+ "The child(0) expr error. expr: " + lhs.toSql());
}
String column = ((SlotRef) lhs).getColumnName();
if (!columnMappingNames.add(column)) {
throw new AnalysisException("Duplicate column setting: " + column);
}
}
for (BinaryPredicate setExpr : setExprs) {
Expr lhs = setExpr.getChild(0);
if (!(lhs instanceof SlotRef)) {
throw new AnalysisException("The left side of the set expr must be the column name");
}
lhs.analyze(analyzer);
if (((SlotRef) lhs).getColumn().isKey()) {
throw new AnalysisException("Only value columns of unique table could be updated");
}
}
for (Column column : targetTable.getColumns()) {
Expr expr = new SlotRef(tableName, column.getName());
for (BinaryPredicate setExpr : setExprs) {
Expr lhs = setExpr.getChild(0);
if (((SlotRef) lhs).getColumn().equals(column)) {
expr = setExpr.getChild(1);
}
}
selectListItems.add(new SelectListItem(expr, null));
cols.add(column.getName());
}
}
@Override
public String toSql() {
StringBuilder sb = new StringBuilder("UPDATE ");
sb.append(tableName.toSql()).append("\n");
sb.append(" ").append("SET ");
for (Expr setExpr : setExprs) {
sb.append(setExpr.toSql()).append(", ");
}
if (fromClause != null) {
sb.append("\n").append(fromClause.toSql());
}
sb.append("\n");
if (whereExpr != null) {
sb.append(" ").append("WHERE ").append(whereExpr.toSql());
}
return sb.toString();
}
}
|
|
You should add it to the commit message and document.
|
public void testParser() {
String sql = "alter ROUTINE LOAD for testdb.routine_name\n"
+ "WHERE k1 > 1 and k2 like \"%starrocks%\",\n"
+ "COLUMNS(k1, k2, k4 = k1 + k2),\n"
+ "COLUMNS TERMINATED BY \"\\t\",\n"
+ "PARTITION(p1,p2) \n"
+ "PROPERTIES\n"
+ "(\n"
+ "\"max_batch_rows\"=\"200000\",\n"
+ "\"max_error_number\"=\"1\",\n"
+ "\"desired_concurrent_number\"=\"3\",\n"
+ "\"max_batch_interval\" = \"21\",\n"
+ "\"strict_mode\" = \"false\",\n"
+ "\"timezone\" = \"Africa/Abidjan\"\n"
+ ")\n"
+ "FROM KAFKA\n"
+ "(\n"
+ "\"kafka_partitions\" = \"0, 1, 2\",\n"
+ "\"kafka_offsets\" = \"100, 200, 100\",\n"
+ "\"property.group.id\" = \"group1\",\n"
+ "\"confluent.schema.registry.url\" = \"https:
+ ");";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt stmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.assertEquals(6, stmt.getAnalyzedJobProperties().size());
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY));
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY));
Assert.assertTrue(stmt.hasDataSourceProperty());
Assert.assertEquals(1, stmt.getDataSourceProperties().getCustomKafkaProperties().size());
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("group.id"));
Assert.assertEquals(3, stmt.getDataSourceProperties().getKafkaPartitionOffsets().size());
Assert.assertEquals("https:
}
|
+ "\"confluent.schema.registry.url\" = \"https:
|
public void testParser() {
String sql = "alter ROUTINE LOAD for testdb.routine_name\n"
+ "WHERE k1 > 1 and k2 like \"%starrocks%\",\n"
+ "COLUMNS(k1, k2, k4 = k1 + k2),\n"
+ "COLUMNS TERMINATED BY \"\\t\",\n"
+ "PARTITION(p1,p2) \n"
+ "PROPERTIES\n"
+ "(\n"
+ "\"max_batch_rows\"=\"200000\",\n"
+ "\"max_error_number\"=\"1\",\n"
+ "\"desired_concurrent_number\"=\"3\",\n"
+ "\"max_batch_interval\" = \"21\",\n"
+ "\"strict_mode\" = \"false\",\n"
+ "\"timezone\" = \"Africa/Abidjan\"\n"
+ ")\n"
+ "FROM KAFKA\n"
+ "(\n"
+ "\"kafka_partitions\" = \"0, 1, 2\",\n"
+ "\"kafka_offsets\" = \"100, 200, 100\",\n"
+ "\"property.group.id\" = \"group1\",\n"
+ "\"confluent.schema.registry.url\" = \"https:
+ ");";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt stmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.assertEquals(6, stmt.getAnalyzedJobProperties().size());
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY));
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY));
Assert.assertTrue(stmt.hasDataSourceProperty());
Assert.assertEquals(1, stmt.getDataSourceProperties().getCustomKafkaProperties().size());
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("group.id"));
Assert.assertEquals(3, stmt.getDataSourceProperties().getKafkaPartitionOffsets().size());
Assert.assertEquals("https:
}
|
class AlterRoutineLoadStmtTest {
private static ConnectContext connectContext;
@Mocked
private Auth auth;
@Before
public void setUp() throws IOException {
connectContext = UtFrameUtils.createDefaultCtx();
new Expectations() {
{
auth.checkGlobalPriv((ConnectContext) any, (PrivPredicate) any);
minTimes = 0;
result = true;
auth.checkDbPriv((ConnectContext) any, anyString, (PrivPredicate) any);
minTimes = 0;
result = true;
auth.checkTblPriv((ConnectContext) any, anyString, anyString, (PrivPredicate) any);
minTimes = 0;
result = true;
}
};
}
@Test
@Test
public void testLoadPropertiesContexts() {
String sql = "ALTER ROUTINE LOAD for testdb.routine_name \n"
+ "PROPERTIES\n"
+ "(\n"
+ "\"max_error_number\"=\"1000\"\n"
+ ")\n";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertNotNull(alterRoutineLoadStmt.getRoutineLoadDesc());
Assert.assertEquals(0, alterRoutineLoadStmt.getLoadPropertyList().size());
}
@Test
public void testLoadColumns() {
String sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`," +
" `v1` = to_bitmap(`k1`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")" +
" FROM KAFKA (" +
"\"kafka_partitions\" = \"0, 1, 2\",\n" +
"\"kafka_offsets\" = \"100, 200, 100\",\n" +
"\"property.group.id\" = \"group1\"\n" +
")";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(6, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name" +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`)" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(5, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(5, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`)," +
" `k1`, `k2`, `k3`, `k4`, `k5` )";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`), `k1`," +
" `v2` = to_bitmap(`k2`), `k2`," +
" `v3` = to_bitmap(`k3`), `k3`," +
" `v4` = to_bitmap(`k4`), `k4`," +
" `v5` = to_bitmap(`k5`), `k5`)" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`," +
" `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
}
@Test
public void testNormal() {
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
jobProperties.put(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY, "200000");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put("property.client.id", "101");
dataSourceProperties.put("property.group.id", "mygroup");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "10000, 20000, 30000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"),
null, jobProperties, routineLoadDataSourceProperties);
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.assertEquals(2, stmt.getAnalyzedJobProperties().size());
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY));
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY));
Assert.assertTrue(stmt.hasDataSourceProperty());
Assert.assertEquals(2, stmt.getDataSourceProperties().getCustomKafkaProperties().size());
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("group.id"));
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("client.id"));
Assert.assertEquals(3, stmt.getDataSourceProperties().getKafkaPartitionOffsets().size());
}
}
@Test
public void testNoPproperties() {
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
Maps.newHashMap(), new RoutineLoadDataSourceProperties());
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
}
@Test
public void testUnsupportedProperties() {
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.FORMAT, "csv");
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, new RoutineLoadDataSourceProperties());
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("format is invalid property"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, "new_topic");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("kafka_topic is invalid kafka custom property"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Partition and offset must be specified at the same time"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "1000, 2000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Partitions number should be equals to offsets number"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "1000, 2000, 3000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Missing kafka partition info"));
}
}
}
@Test
public void testBackquote() throws SecurityException, IllegalArgumentException {
String sql = "ALTER ROUTINE LOAD FOR `db_test`.`rl_test` PROPERTIES (\"desired_concurrent_number\" = \"10\")" +
"FROM kafka ( \"kafka_partitions\" = \"0, 1, 2\", \"kafka_offsets\" = \"100, 200, 100\"," +
"\"property.group.id\" = \"new_group\" )";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt stmt = (AlterRoutineLoadStmt) stmts.get(0);
Assert.assertEquals("db_test", stmt.getDbName());
Assert.assertEquals("rl_test", stmt.getLabelName().getLabelName());
}
}
|
class AlterRoutineLoadStmtTest {
private static ConnectContext connectContext;
@Mocked
private Auth auth;
@Before
public void setUp() throws IOException {
connectContext = UtFrameUtils.createDefaultCtx();
new Expectations() {
{
auth.checkGlobalPriv((ConnectContext) any, (PrivPredicate) any);
minTimes = 0;
result = true;
auth.checkDbPriv((ConnectContext) any, anyString, (PrivPredicate) any);
minTimes = 0;
result = true;
auth.checkTblPriv((ConnectContext) any, anyString, anyString, (PrivPredicate) any);
minTimes = 0;
result = true;
}
};
}
@Test
@Test
public void testLoadPropertiesContexts() {
String sql = "ALTER ROUTINE LOAD for testdb.routine_name \n"
+ "PROPERTIES\n"
+ "(\n"
+ "\"max_error_number\"=\"1000\"\n"
+ ")\n";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertNotNull(alterRoutineLoadStmt.getRoutineLoadDesc());
Assert.assertEquals(0, alterRoutineLoadStmt.getLoadPropertyList().size());
}
@Test
public void testLoadColumns() {
String sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`," +
" `v1` = to_bitmap(`k1`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")" +
" FROM KAFKA (" +
"\"kafka_partitions\" = \"0, 1, 2\",\n" +
"\"kafka_offsets\" = \"100, 200, 100\",\n" +
"\"property.group.id\" = \"group1\"\n" +
")";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(6, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name" +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`)" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(5, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(5, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`)," +
" `k1`, `k2`, `k3`, `k4`, `k5` )";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS( `v1` = to_bitmap(`k1`), `k1`," +
" `v2` = to_bitmap(`k2`), `k2`," +
" `v3` = to_bitmap(`k3`), `k3`," +
" `v4` = to_bitmap(`k4`), `k4`," +
" `v5` = to_bitmap(`k5`), `k5`)" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
sql = "ALTER ROUTINE LOAD for testdb.routine_name " +
" COLUMNS(`k1`, `k2`, `k3`, `k4`, `k5`," +
" `v1` = to_bitmap(`k1`)," +
" `v2` = to_bitmap(`k2`)," +
" `v3` = to_bitmap(`k3`)," +
" `v4` = to_bitmap(`k4`)," +
" `v5` = to_bitmap(`k5`))" +
" PROPERTIES (\"desired_concurrent_number\"=\"1\")";
stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
alterRoutineLoadStmt = (AlterRoutineLoadStmt)stmts.get(0);
AlterRoutineLoadAnalyzer.analyze(alterRoutineLoadStmt, connectContext);
Assert.assertEquals(10, alterRoutineLoadStmt.getRoutineLoadDesc().getColumnsInfo().getColumns().size());
}
@Test
public void testNormal() {
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
jobProperties.put(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY, "200000");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put("property.client.id", "101");
dataSourceProperties.put("property.group.id", "mygroup");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "10000, 20000, 30000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"),
null, jobProperties, routineLoadDataSourceProperties);
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.assertEquals(2, stmt.getAnalyzedJobProperties().size());
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY));
Assert.assertTrue(
stmt.getAnalyzedJobProperties().containsKey(CreateRoutineLoadStmt.MAX_BATCH_ROWS_PROPERTY));
Assert.assertTrue(stmt.hasDataSourceProperty());
Assert.assertEquals(2, stmt.getDataSourceProperties().getCustomKafkaProperties().size());
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("group.id"));
Assert.assertTrue(stmt.getDataSourceProperties().getCustomKafkaProperties().containsKey("client.id"));
Assert.assertEquals(3, stmt.getDataSourceProperties().getKafkaPartitionOffsets().size());
}
}
@Test
public void testNoPproperties() {
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
Maps.newHashMap(), new RoutineLoadDataSourceProperties());
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
}
@Test
public void testUnsupportedProperties() {
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.FORMAT, "csv");
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, new RoutineLoadDataSourceProperties());
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("format is invalid property"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, "new_topic");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("kafka_topic is invalid kafka custom property"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Partition and offset must be specified at the same time"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, "1,2,3");
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "1000, 2000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Partitions number should be equals to offsets number"));
}
}
{
Map<String, String> jobProperties = Maps.newHashMap();
jobProperties.put(CreateRoutineLoadStmt.MAX_ERROR_NUMBER_PROPERTY, "100");
String typeName = "kafka";
Map<String, String> dataSourceProperties = Maps.newHashMap();
dataSourceProperties.put(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY, "1000, 2000, 3000");
RoutineLoadDataSourceProperties routineLoadDataSourceProperties = new RoutineLoadDataSourceProperties(
typeName, dataSourceProperties);
AlterRoutineLoadStmt stmt = new AlterRoutineLoadStmt(new LabelName("db1", "label1"), null,
jobProperties, routineLoadDataSourceProperties);
try {
AlterRoutineLoadAnalyzer.analyze(stmt, connectContext);
Assert.fail();
} catch (SemanticException e) {
Assert.assertTrue(e.getMessage().contains("Missing kafka partition info"));
}
}
}
@Test
public void testBackquote() throws SecurityException, IllegalArgumentException {
String sql = "ALTER ROUTINE LOAD FOR `db_test`.`rl_test` PROPERTIES (\"desired_concurrent_number\" = \"10\")" +
"FROM kafka ( \"kafka_partitions\" = \"0, 1, 2\", \"kafka_offsets\" = \"100, 200, 100\"," +
"\"property.group.id\" = \"new_group\" )";
List<StatementBase> stmts = com.starrocks.sql.parser.SqlParser.parse(sql, 32);
AlterRoutineLoadStmt stmt = (AlterRoutineLoadStmt) stmts.get(0);
Assert.assertEquals("db_test", stmt.getDbName());
Assert.assertEquals("rl_test", stmt.getLabelName().getLabelName());
}
}
|
nit: maybe we could add some `checkState` that we are indeed in the `timer` thread? But I'm not sure if it's possible/easily do-able?
|
private void onTriggerFailure(@Nullable PendingCheckpoint checkpoint, Throwable throwable) {
try {
if (checkpoint != null && !checkpoint.isDiscarded()) {
int numUnsuccessful = numUnsuccessfulCheckpointsTriggers.incrementAndGet();
LOG.warn(
"Failed to trigger checkpoint {} for job {}. ({} consecutive failed attempts so far)",
checkpoint.getCheckpointId(),
job,
numUnsuccessful,
throwable);
final CheckpointException cause =
getCheckpointException(
CheckpointFailureReason.TRIGGER_CHECKPOINT_FAILURE, throwable);
synchronized (lock) {
abortPendingCheckpoint(checkpoint, cause);
}
}
} finally {
isTriggering = false;
checkQueuedCheckpointTriggerRequest();
}
}
|
LOG.warn(
|
private void onTriggerFailure(@Nullable PendingCheckpoint checkpoint, Throwable throwable) {
try {
if (checkpoint != null && !checkpoint.isDiscarded()) {
int numUnsuccessful = numUnsuccessfulCheckpointsTriggers.incrementAndGet();
LOG.warn(
"Failed to trigger checkpoint {} for job {}. ({} consecutive failed attempts so far)",
checkpoint.getCheckpointId(),
job,
numUnsuccessful,
throwable);
final CheckpointException cause =
getCheckpointException(
CheckpointFailureReason.TRIGGER_CHECKPOINT_FAILURE, throwable);
synchronized (lock) {
abortPendingCheckpoint(checkpoint, cause);
}
}
} finally {
isTriggering = false;
checkQueuedCheckpointTriggerRequest();
}
}
|
class CheckpointCoordinator {
private static final Logger LOG = LoggerFactory.getLogger(CheckpointCoordinator.class);
/** The number of recent checkpoints whose IDs are remembered. */
private static final int NUM_GHOST_CHECKPOINT_IDS = 16;
/** Coordinator-wide lock to safeguard the checkpoint updates. */
private final Object lock = new Object();
/** The job whose checkpoint this coordinator coordinates. */
private final JobID job;
/** Default checkpoint properties. **/
private final CheckpointProperties checkpointProperties;
/** The executor used for asynchronous calls, like potentially blocking I/O. */
private final Executor executor;
/** Tasks who need to be sent a message when a checkpoint is started. */
private final ExecutionVertex[] tasksToTrigger;
/** Tasks who need to acknowledge a checkpoint before it succeeds. */
private final ExecutionVertex[] tasksToWaitFor;
/** Tasks who need to be sent a message when a checkpoint is confirmed. */
private final ExecutionVertex[] tasksToCommitTo;
/** Map from checkpoint ID to the pending checkpoint. */
private final Map<Long, PendingCheckpoint> pendingCheckpoints;
/** Completed checkpoints. Implementations can be blocking. Make sure calls to methods
* accessing this don't block the job manager actor and run asynchronously. */
private final CompletedCheckpointStore completedCheckpointStore;
/** The root checkpoint state backend, which is responsible for initializing the
* checkpoint, storing the metadata, and cleaning up the checkpoint. */
private final CheckpointStorageCoordinatorView checkpointStorage;
/** A list of recent checkpoint IDs, to identify late messages (vs invalid ones). */
private final ArrayDeque<Long> recentPendingCheckpoints;
/** Checkpoint ID counter to ensure ascending IDs. In case of job manager failures, these
* need to be ascending across job managers. */
private final CheckpointIDCounter checkpointIdCounter;
/** The base checkpoint interval. Actual trigger time may be affected by the
* max concurrent checkpoints and minimum-pause values */
private final long baseInterval;
/** The max time (in ms) that a checkpoint may take. */
private final long checkpointTimeout;
/** The min time(in ms) to delay after a checkpoint could be triggered. Allows to
* enforce minimum processing time between checkpoint attempts */
private final long minPauseBetweenCheckpoints;
/** The maximum number of checkpoints that may be in progress at the same time. */
private final int maxConcurrentCheckpointAttempts;
/** The timer that handles the checkpoint timeouts and triggers periodic checkpoints.
* It must be single-threaded. Eventually it will be replaced by main thread executor. */
private final ScheduledExecutor timer;
/** The master checkpoint hooks executed by this checkpoint coordinator. */
private final HashMap<String, MasterTriggerRestoreHook<?>> masterHooks;
/** Actor that receives status updates from the execution graph this coordinator works for. */
private JobStatusListener jobStatusListener;
/** The number of consecutive failed trigger attempts. */
private final AtomicInteger numUnsuccessfulCheckpointsTriggers = new AtomicInteger(0);
/** A handle to the current periodic trigger, to cancel it when necessary. */
private ScheduledFuture<?> currentPeriodicTrigger;
/** The timestamp (via {@link Clock
* completed. */
private long lastCheckpointCompletionRelativeTime;
/** Flag whether a triggered checkpoint should immediately schedule the next checkpoint.
* Non-volatile, because only accessed in synchronized scope */
private boolean periodicScheduling;
/** Flag whether periodic triggering is suspended (too many concurrent pending checkpoint).
* Non-volatile, because only accessed in synchronized scope */
private boolean periodicTriggeringSuspended;
/** Flag marking the coordinator as shut down (not accepting any messages any more). */
private volatile boolean shutdown;
/** Optional tracker for checkpoint statistics. */
@Nullable
private CheckpointStatsTracker statsTracker;
/** A factory for SharedStateRegistry objects. */
private final SharedStateRegistryFactory sharedStateRegistryFactory;
/** Registry that tracks state which is shared across (incremental) checkpoints. */
private SharedStateRegistry sharedStateRegistry;
private boolean isPreferCheckpointForRecovery;
private final CheckpointFailureManager failureManager;
private final Clock clock;
/** Flag represents there is an in-flight trigger request. */
private boolean isTriggering = false;
/** A queue to cache those trigger requests which can't be trigger immediately. */
private final ArrayDeque<CheckpointTriggerRequest> triggerRequestQueue;
public CheckpointCoordinator(
JobID job,
CheckpointCoordinatorConfiguration chkConfig,
ExecutionVertex[] tasksToTrigger,
ExecutionVertex[] tasksToWaitFor,
ExecutionVertex[] tasksToCommitTo,
CheckpointIDCounter checkpointIDCounter,
CompletedCheckpointStore completedCheckpointStore,
StateBackend checkpointStateBackend,
Executor executor,
ScheduledExecutor timer,
SharedStateRegistryFactory sharedStateRegistryFactory,
CheckpointFailureManager failureManager) {
this(
job,
chkConfig,
tasksToTrigger,
tasksToWaitFor,
tasksToCommitTo,
checkpointIDCounter,
completedCheckpointStore,
checkpointStateBackend,
executor,
timer,
sharedStateRegistryFactory,
failureManager,
SystemClock.getInstance());
}
@VisibleForTesting
public CheckpointCoordinator(
JobID job,
CheckpointCoordinatorConfiguration chkConfig,
ExecutionVertex[] tasksToTrigger,
ExecutionVertex[] tasksToWaitFor,
ExecutionVertex[] tasksToCommitTo,
CheckpointIDCounter checkpointIDCounter,
CompletedCheckpointStore completedCheckpointStore,
StateBackend checkpointStateBackend,
Executor executor,
ScheduledExecutor timer,
SharedStateRegistryFactory sharedStateRegistryFactory,
CheckpointFailureManager failureManager,
Clock clock) {
checkNotNull(checkpointStateBackend);
long minPauseBetweenCheckpoints = chkConfig.getMinPauseBetweenCheckpoints();
if (minPauseBetweenCheckpoints > 365L * 24 * 60 * 60 * 1_000) {
minPauseBetweenCheckpoints = 365L * 24 * 60 * 60 * 1_000;
}
long baseInterval = chkConfig.getCheckpointInterval();
if (baseInterval < minPauseBetweenCheckpoints) {
baseInterval = minPauseBetweenCheckpoints;
}
this.job = checkNotNull(job);
this.baseInterval = baseInterval;
this.checkpointTimeout = chkConfig.getCheckpointTimeout();
this.minPauseBetweenCheckpoints = minPauseBetweenCheckpoints;
this.maxConcurrentCheckpointAttempts = chkConfig.getMaxConcurrentCheckpoints();
this.tasksToTrigger = checkNotNull(tasksToTrigger);
this.tasksToWaitFor = checkNotNull(tasksToWaitFor);
this.tasksToCommitTo = checkNotNull(tasksToCommitTo);
this.pendingCheckpoints = new LinkedHashMap<>();
this.checkpointIdCounter = checkNotNull(checkpointIDCounter);
this.completedCheckpointStore = checkNotNull(completedCheckpointStore);
this.executor = checkNotNull(executor);
this.sharedStateRegistryFactory = checkNotNull(sharedStateRegistryFactory);
this.sharedStateRegistry = sharedStateRegistryFactory.create(executor);
this.isPreferCheckpointForRecovery = chkConfig.isPreferCheckpointForRecovery();
this.failureManager = checkNotNull(failureManager);
this.clock = checkNotNull(clock);
this.recentPendingCheckpoints = new ArrayDeque<>(NUM_GHOST_CHECKPOINT_IDS);
this.masterHooks = new HashMap<>();
this.triggerRequestQueue = new ArrayDeque<>();
this.timer = timer;
this.checkpointProperties = CheckpointProperties.forCheckpoint(chkConfig.getCheckpointRetentionPolicy());
try {
this.checkpointStorage = checkpointStateBackend.createCheckpointStorage(job);
checkpointStorage.initializeBaseLocations();
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to create checkpoint storage at checkpoint coordinator side.", e);
}
try {
checkpointIDCounter.start();
} catch (Throwable t) {
throw new RuntimeException("Failed to start checkpoint ID counter: " + t.getMessage(), t);
}
}
/**
* Adds the given master hook to the checkpoint coordinator. This method does nothing, if
* the checkpoint coordinator already contained a hook with the same ID (as defined via
* {@link MasterTriggerRestoreHook
*
* @param hook The hook to add.
* @return True, if the hook was added, false if the checkpoint coordinator already
* contained a hook with the same ID.
*/
public boolean addMasterHook(MasterTriggerRestoreHook<?> hook) {
checkNotNull(hook);
final String id = hook.getIdentifier();
checkArgument(!StringUtils.isNullOrWhitespaceOnly(id), "The hook has a null or empty id");
synchronized (lock) {
if (!masterHooks.containsKey(id)) {
masterHooks.put(id, hook);
return true;
}
else {
return false;
}
}
}
/**
* Gets the number of currently register master hooks.
*/
public int getNumberOfRegisteredMasterHooks() {
synchronized (lock) {
return masterHooks.size();
}
}
/**
* Sets the checkpoint stats tracker.
*
* @param statsTracker The checkpoint stats tracker.
*/
public void setCheckpointStatsTracker(@Nullable CheckpointStatsTracker statsTracker) {
this.statsTracker = statsTracker;
}
/**
* Shuts down the checkpoint coordinator.
*
* <p>After this method has been called, the coordinator does not accept
* and further messages and cannot trigger any further checkpoints.
*/
public void shutdown(JobStatus jobStatus) throws Exception {
synchronized (lock) {
if (!shutdown) {
shutdown = true;
LOG.info("Stopping checkpoint coordinator for job {}.", job);
periodicScheduling = false;
periodicTriggeringSuspended = false;
MasterHooks.close(masterHooks.values(), LOG);
masterHooks.clear();
final CheckpointException reason = new CheckpointException(
CheckpointFailureReason.CHECKPOINT_COORDINATOR_SHUTDOWN);
abortPendingAndQueuedCheckpoints(reason);
completedCheckpointStore.shutdown(jobStatus);
checkpointIdCounter.shutdown(jobStatus);
}
}
}
public boolean isShutdown() {
return shutdown;
}
/**
* Triggers a savepoint with the given savepoint directory as a target.
*
* @param timestamp The timestamp for the savepoint.
* @param targetLocation Target location for the savepoint, optional. If null, the
* state backend's configured default will be used.
* @return A future to the completed checkpoint
* @throws IllegalStateException If no savepoint directory has been
* specified and no default savepoint directory has been
* configured
*/
public CompletableFuture<CompletedCheckpoint> triggerSavepoint(
final long timestamp,
@Nullable final String targetLocation) {
final CheckpointProperties properties = CheckpointProperties.forSavepoint();
return triggerSavepointInternal(timestamp, properties, false, targetLocation);
}
/**
* Triggers a synchronous savepoint with the given savepoint directory as a target.
*
* @param timestamp The timestamp for the savepoint.
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers.
* @param targetLocation Target location for the savepoint, optional. If null, the
* state backend's configured default will be used.
* @return A future to the completed checkpoint
* @throws IllegalStateException If no savepoint directory has been
* specified and no default savepoint directory has been
* configured
*/
public CompletableFuture<CompletedCheckpoint> triggerSynchronousSavepoint(
final long timestamp,
final boolean advanceToEndOfEventTime,
@Nullable final String targetLocation) {
final CheckpointProperties properties = CheckpointProperties.forSyncSavepoint();
return triggerSavepointInternal(timestamp, properties, advanceToEndOfEventTime, targetLocation);
}
private CompletableFuture<CompletedCheckpoint> triggerSavepointInternal(
final long timestamp,
final CheckpointProperties checkpointProperties,
final boolean advanceToEndOfEventTime,
@Nullable final String targetLocation) {
checkNotNull(checkpointProperties);
final CompletableFuture<CompletedCheckpoint> resultFuture = new CompletableFuture<>();
timer.execute(() -> triggerCheckpoint(
timestamp,
checkpointProperties,
targetLocation,
false,
advanceToEndOfEventTime)
.whenComplete((completedCheckpoint, throwable) -> {
if (throwable == null) {
resultFuture.complete(completedCheckpoint);
} else {
resultFuture.completeExceptionally(throwable);
}
}));
return resultFuture;
}
/**
* Triggers a new standard checkpoint and uses the given timestamp as the checkpoint
* timestamp. The return value is a future. It completes when the checkpoint triggered finishes
* or an error occurred.
*
* @param timestamp The timestamp for the checkpoint.
* @param isPeriodic Flag indicating whether this triggered checkpoint is
* periodic. If this flag is true, but the periodic scheduler is disabled,
* the checkpoint will be declined.
* @return a future to the completed checkpoint.
*/
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(long timestamp, boolean isPeriodic) {
return triggerCheckpoint(timestamp, checkpointProperties, null, isPeriodic, false);
}
@VisibleForTesting
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime) {
if (advanceToEndOfTime && !(props.isSynchronous() && props.isSavepoint())) {
return FutureUtils.completedExceptionally(new IllegalArgumentException(
"Only synchronous savepoints are allowed to advance the watermark to MAX."));
}
final CompletableFuture<CompletedCheckpoint> onCompletionPromise =
new CompletableFuture<>();
synchronized (lock) {
if (isTriggering || !triggerRequestQueue.isEmpty()) {
triggerRequestQueue.add(new CheckpointTriggerRequest(
timestamp,
props,
externalSavepointLocation,
isPeriodic,
advanceToEndOfTime,
onCompletionPromise));
return onCompletionPromise;
}
}
startTriggeringCheckpoint(
timestamp,
props,
externalSavepointLocation,
isPeriodic,
advanceToEndOfTime,
onCompletionPromise);
return onCompletionPromise;
}
private void startTriggeringCheckpoint(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
try {
synchronized (lock) {
preCheckBeforeTriggeringCheckpoint(isPeriodic, props.forceCheckpoint());
}
final Execution[] executions = getTriggerExecutions();
final Map<ExecutionAttemptID, ExecutionVertex> ackTasks = getAckTasks();
Preconditions.checkState(!isTriggering);
isTriggering = true;
final CompletableFuture<PendingCheckpoint> pendingCheckpointCompletableFuture =
initializeCheckpoint(props, externalSavepointLocation)
.thenApplyAsync((checkpointIdAndStorageLocation) -> createPendingCheckpoint(
timestamp,
props,
ackTasks,
isPeriodic,
checkpointIdAndStorageLocation.checkpointId,
checkpointIdAndStorageLocation.checkpointStorageLocation,
onCompletionPromise), timer);
pendingCheckpointCompletableFuture
.thenCompose(this::snapshotMasterState)
.whenComplete((ignored, throwable) -> {
final PendingCheckpoint checkpoint =
FutureUtils.getWithoutException(pendingCheckpointCompletableFuture);
if (throwable == null && checkpoint != null && !checkpoint.isDiscarded()) {
snapshotTaskState(
timestamp,
checkpoint.getCheckpointId(),
checkpoint.getCheckpointStorageLocation(),
props,
executions,
advanceToEndOfTime);
onTriggerSuccess();
} else {
timer.execute(() -> {
if (checkpoint == null) {
onTriggerFailure(onCompletionPromise, throwable);
} else {
onTriggerFailure(checkpoint, throwable);
}
});
}
});
} catch (Throwable throwable) {
onTriggerFailure(onCompletionPromise, throwable);
}
}
/**
* Initialize the checkpoint trigger asynchronously. It will be executed in io thread due to
* it might be time-consuming.
*
* @param props checkpoint properties
* @param externalSavepointLocation the external savepoint location, it might be null
* @return the future of initialized result, checkpoint id and checkpoint location
*/
private CompletableFuture<CheckpointIdAndStorageLocation> initializeCheckpoint(
CheckpointProperties props,
@Nullable String externalSavepointLocation) {
return CompletableFuture.supplyAsync(() -> {
try {
long checkpointID = checkpointIdCounter.getAndIncrement();
CheckpointStorageLocation checkpointStorageLocation = props.isSavepoint() ?
checkpointStorage
.initializeLocationForSavepoint(checkpointID, externalSavepointLocation) :
checkpointStorage.initializeLocationForCheckpoint(checkpointID);
return new CheckpointIdAndStorageLocation(checkpointID, checkpointStorageLocation);
} catch (Throwable throwable) {
throw new CompletionException(throwable);
}
}, executor);
}
private PendingCheckpoint createPendingCheckpoint(
long timestamp,
CheckpointProperties props,
Map<ExecutionAttemptID, ExecutionVertex> ackTasks,
boolean isPeriodic,
long checkpointID,
CheckpointStorageLocation checkpointStorageLocation,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
synchronized (lock) {
try {
preCheckGlobalState(isPeriodic);
} catch (Throwable t) {
throw new CompletionException(t);
}
}
final PendingCheckpoint checkpoint = new PendingCheckpoint(
job,
checkpointID,
timestamp,
ackTasks,
masterHooks.keySet(),
props,
checkpointStorageLocation,
executor,
onCompletionPromise);
if (statsTracker != null) {
PendingCheckpointStats callback = statsTracker.reportPendingCheckpoint(
checkpointID,
timestamp,
props);
checkpoint.setStatsCallback(callback);
}
synchronized (lock) {
pendingCheckpoints.put(checkpointID, checkpoint);
ScheduledFuture<?> cancellerHandle = timer.schedule(
new CheckpointCanceller(checkpoint),
checkpointTimeout, TimeUnit.MILLISECONDS);
if (!checkpoint.setCancellerHandle(cancellerHandle)) {
cancellerHandle.cancel(false);
}
}
LOG.info("Triggering checkpoint {} @ {} for job {}.", checkpointID, timestamp, job);
return checkpoint;
}
/**
* Snapshot master hook states asynchronously.
*
* @param checkpoint the pending checkpoint
* @return the future represents master hook states are finished or not
*/
private CompletableFuture<Void> snapshotMasterState(PendingCheckpoint checkpoint) {
if (masterHooks.isEmpty()) {
return CompletableFuture.completedFuture(null);
}
final long checkpointID = checkpoint.getCheckpointId();
final long timestamp = checkpoint.getCheckpointTimestamp();
final CompletableFuture<Void> masterStateCompletableFuture = new CompletableFuture<>();
for (MasterTriggerRestoreHook<?> masterHook : masterHooks.values()) {
MasterHooks
.triggerHook(masterHook, checkpointID, timestamp, executor)
.whenCompleteAsync((masterState, throwable) -> {
try {
synchronized (lock) {
if (masterStateCompletableFuture.isDone()) {
return;
}
if (checkpoint.isDiscarded()) {
throw new IllegalStateException(
"Checkpoint " + checkpointID + " has been discarded");
}
if (throwable == null) {
checkpoint.acknowledgeMasterState(
masterHook.getIdentifier(), masterState);
if (checkpoint.areMasterStatesFullyAcknowledged()) {
masterStateCompletableFuture.complete(null);
}
} else {
masterStateCompletableFuture.completeExceptionally(throwable);
}
}
} catch (Throwable t) {
masterStateCompletableFuture.completeExceptionally(t);
}
}, timer);
}
return masterStateCompletableFuture;
}
/**
* Snapshot task state.
*
* @param timestamp the timestamp of this checkpoint reques
* @param checkpointID the checkpoint id
* @param checkpointStorageLocation the checkpoint location
* @param props the checkpoint properties
* @param executions the executions which should be triggered
* @param advanceToEndOfTime Flag indicating if the source should inject a {@code MAX_WATERMARK}
* in the pipeline to fire any registered event-time timers.
*/
private void snapshotTaskState(
long timestamp,
long checkpointID,
CheckpointStorageLocation checkpointStorageLocation,
CheckpointProperties props,
Execution[] executions,
boolean advanceToEndOfTime) {
final CheckpointOptions checkpointOptions = new CheckpointOptions(
props.getCheckpointType(),
checkpointStorageLocation.getLocationReference());
for (Execution execution: executions) {
if (props.isSynchronous()) {
execution.triggerSynchronousSavepoint(checkpointID, timestamp, checkpointOptions, advanceToEndOfTime);
} else {
execution.triggerCheckpoint(checkpointID, timestamp, checkpointOptions);
}
}
}
/**
* Trigger request is successful.
* NOTE, it must be invoked if trigger request is successful.
*/
private void onTriggerSuccess() {
isTriggering = false;
numUnsuccessfulCheckpointsTriggers.set(0);
checkQueuedCheckpointTriggerRequest();
}
/**
* The trigger request is failed prematurely without a proper initialization.
* There is no resource to release, but the completion promise needs to fail manually here.
*
* @param onCompletionPromise the completion promise of the checkpoint/savepoint
* @param throwable the reason of trigger failure
*/
private void onTriggerFailure(
CompletableFuture<CompletedCheckpoint> onCompletionPromise, Throwable throwable) {
final CheckpointException checkpointException =
getCheckpointException(CheckpointFailureReason.TRIGGER_CHECKPOINT_FAILURE, throwable);
onCompletionPromise.completeExceptionally(checkpointException);
onTriggerFailure((PendingCheckpoint) null, checkpointException);
}
/**
* The trigger request is failed.
* NOTE, it must be invoked if trigger request is failed.
*
* @param checkpoint the pending checkpoint which is failed. It could be null if it's failed
* prematurely without a proper initialization.
* @param throwable the reason of trigger failure
*/
/**
* Checks whether there is a trigger request queued. Consumes it if there is one.
* NOTE: this must be called after each triggering
*/
private void checkQueuedCheckpointTriggerRequest() {
synchronized (lock) {
if (triggerRequestQueue.isEmpty()) {
return;
}
}
final CheckpointTriggerRequest request;
synchronized (lock) {
request = triggerRequestQueue.poll();
}
if (request != null) {
startTriggeringCheckpoint(
request.timestamp,
request.props,
request.externalSavepointLocation,
request.isPeriodic,
request.advanceToEndOfTime,
request.onCompletionPromise);
}
}
/**
* Receives a {@link DeclineCheckpoint} message for a pending checkpoint.
*
* @param message Checkpoint decline from the task manager
* @param taskManagerLocationInfo The location info of the decline checkpoint message's sender
*/
public void receiveDeclineMessage(DeclineCheckpoint message, String taskManagerLocationInfo) {
if (shutdown || message == null) {
return;
}
if (!job.equals(message.getJob())) {
throw new IllegalArgumentException("Received DeclineCheckpoint message for job " +
message.getJob() + " from " + taskManagerLocationInfo + " while this coordinator handles job " + job);
}
final long checkpointId = message.getCheckpointId();
final String reason = (message.getReason() != null ? message.getReason().getMessage() : "");
PendingCheckpoint checkpoint;
synchronized (lock) {
if (shutdown) {
return;
}
checkpoint = pendingCheckpoints.get(checkpointId);
if (checkpoint != null) {
Preconditions.checkState(
!checkpoint.isDiscarded(),
"Received message for discarded but non-removed checkpoint " + checkpointId);
LOG.info("Decline checkpoint {} by task {} of job {} at {}.",
checkpointId,
message.getTaskExecutionId(),
job,
taskManagerLocationInfo);
final CheckpointException checkpointException;
if (message.getReason() == null) {
checkpointException =
new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED);
} else {
checkpointException = getCheckpointException(
CheckpointFailureReason.JOB_FAILURE, message.getReason());
}
abortPendingCheckpoint(
checkpoint,
checkpointException,
message.getTaskExecutionId());
} else if (LOG.isDebugEnabled()) {
if (recentPendingCheckpoints.contains(checkpointId)) {
LOG.debug("Received another decline message for now expired checkpoint attempt {} from task {} of job {} at {} : {}",
checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason);
} else {
LOG.debug("Received decline message for unknown (too old?) checkpoint attempt {} from task {} of job {} at {} : {}",
checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason);
}
}
}
}
/**
* Receives an AcknowledgeCheckpoint message and returns whether the
* message was associated with a pending checkpoint.
*
* @param message Checkpoint ack from the task manager
*
* @param taskManagerLocationInfo The location of the acknowledge checkpoint message's sender
* @return Flag indicating whether the ack'd checkpoint was associated
* with a pending checkpoint.
*
* @throws CheckpointException If the checkpoint cannot be added to the completed checkpoint store.
*/
public boolean receiveAcknowledgeMessage(AcknowledgeCheckpoint message, String taskManagerLocationInfo) throws CheckpointException {
if (shutdown || message == null) {
return false;
}
if (!job.equals(message.getJob())) {
LOG.error("Received wrong AcknowledgeCheckpoint message for job {} from {} : {}", job, taskManagerLocationInfo, message);
return false;
}
final long checkpointId = message.getCheckpointId();
synchronized (lock) {
if (shutdown) {
return false;
}
final PendingCheckpoint checkpoint = pendingCheckpoints.get(checkpointId);
if (checkpoint != null && !checkpoint.isDiscarded()) {
switch (checkpoint.acknowledgeTask(message.getTaskExecutionId(), message.getSubtaskState(), message.getCheckpointMetrics())) {
case SUCCESS:
LOG.debug("Received acknowledge message for checkpoint {} from task {} of job {} at {}.",
checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
if (checkpoint.areTasksFullyAcknowledged()) {
completePendingCheckpoint(checkpoint);
}
break;
case DUPLICATE:
LOG.debug("Received a duplicate acknowledge message for checkpoint {}, task {}, job {}, location {}.",
message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
break;
case UNKNOWN:
LOG.warn("Could not acknowledge the checkpoint {} for task {} of job {} at {}, " +
"because the task's execution attempt id was unknown. Discarding " +
"the state handle to avoid lingering state.", message.getCheckpointId(),
message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
break;
case DISCARDED:
LOG.warn("Could not acknowledge the checkpoint {} for task {} of job {} at {}, " +
"because the pending checkpoint had been discarded. Discarding the " +
"state handle tp avoid lingering state.",
message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
}
return true;
}
else if (checkpoint != null) {
throw new IllegalStateException(
"Received message for discarded but non-removed checkpoint " + checkpointId);
}
else {
boolean wasPendingCheckpoint;
if (recentPendingCheckpoints.contains(checkpointId)) {
wasPendingCheckpoint = true;
LOG.warn("Received late message for now expired checkpoint attempt {} from task " +
"{} of job {} at {}.", checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
}
else {
LOG.debug("Received message for an unknown checkpoint {} from task {} of job {} at {}.",
checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
wasPendingCheckpoint = false;
}
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
return wasPendingCheckpoint;
}
}
}
/**
* Try to complete the given pending checkpoint.
*
* <p>Important: This method should only be called in the checkpoint lock scope.
*
* @param pendingCheckpoint to complete
* @throws CheckpointException if the completion failed
*/
private void completePendingCheckpoint(PendingCheckpoint pendingCheckpoint) throws CheckpointException {
final long checkpointId = pendingCheckpoint.getCheckpointId();
final CompletedCheckpoint completedCheckpoint;
Map<OperatorID, OperatorState> operatorStates = pendingCheckpoint.getOperatorStates();
sharedStateRegistry.registerAll(operatorStates.values());
try {
try {
completedCheckpoint = pendingCheckpoint.finalizeCheckpoint();
failureManager.handleCheckpointSuccess(pendingCheckpoint.getCheckpointId());
}
catch (Exception e1) {
if (!pendingCheckpoint.isDiscarded()) {
abortPendingCheckpoint(
pendingCheckpoint,
new CheckpointException(
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, e1));
}
throw new CheckpointException("Could not finalize the pending checkpoint " + checkpointId + '.',
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, e1);
}
Preconditions.checkState(pendingCheckpoint.isDiscarded() && completedCheckpoint != null);
try {
completedCheckpointStore.addCheckpoint(completedCheckpoint);
} catch (Exception exception) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
completedCheckpoint.discardOnFailedStoring();
} catch (Throwable t) {
LOG.warn("Could not properly discard completed checkpoint {}.", completedCheckpoint.getCheckpointID(), t);
}
}
});
throw new CheckpointException("Could not complete the pending checkpoint " + checkpointId + '.',
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, exception);
}
} finally {
pendingCheckpoints.remove(checkpointId);
resumePeriodicTriggering();
}
rememberRecentCheckpointId(checkpointId);
dropSubsumedCheckpoints(checkpointId);
lastCheckpointCompletionRelativeTime = clock.relativeTimeMillis();
LOG.info("Completed checkpoint {} for job {} ({} bytes in {} ms).", checkpointId, job,
completedCheckpoint.getStateSize(), completedCheckpoint.getDuration());
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("Checkpoint state: ");
for (OperatorState state : completedCheckpoint.getOperatorStates().values()) {
builder.append(state);
builder.append(", ");
}
builder.setLength(builder.length() - 2);
LOG.debug(builder.toString());
}
final long timestamp = completedCheckpoint.getTimestamp();
for (ExecutionVertex ev : tasksToCommitTo) {
Execution ee = ev.getCurrentExecutionAttempt();
if (ee != null) {
ee.notifyCheckpointComplete(checkpointId, timestamp);
}
}
}
/**
* Fails all pending checkpoints which have not been acknowledged by the given execution
* attempt id.
*
* @param executionAttemptId for which to discard unacknowledged pending checkpoints
* @param cause of the failure
*/
public void failUnacknowledgedPendingCheckpointsFor(ExecutionAttemptID executionAttemptId, Throwable cause) {
synchronized (lock) {
abortPendingCheckpoints(
checkpoint -> !checkpoint.isAcknowledgedBy(executionAttemptId),
new CheckpointException(CheckpointFailureReason.TASK_FAILURE, cause));
}
}
private void rememberRecentCheckpointId(long id) {
if (recentPendingCheckpoints.size() >= NUM_GHOST_CHECKPOINT_IDS) {
recentPendingCheckpoints.removeFirst();
}
recentPendingCheckpoints.addLast(id);
}
private void dropSubsumedCheckpoints(long checkpointId) {
abortPendingCheckpoints(
checkpoint -> checkpoint.getCheckpointId() < checkpointId && checkpoint.canBeSubsumed(),
new CheckpointException(CheckpointFailureReason.CHECKPOINT_SUBSUMED));
}
/**
* Resumes suspended periodic triggering.
*
* <p>NOTE: The caller of this method must hold the lock when invoking the method!
*/
private void resumePeriodicTriggering() {
assert(Thread.holdsLock(lock));
if (shutdown || !periodicScheduling) {
return;
}
if (periodicTriggeringSuspended) {
periodicTriggeringSuspended = false;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
}
currentPeriodicTrigger = scheduleTriggerWithDelay(0L);
}
}
/**
* Restores the latest checkpointed state.
*
* @param tasks Map of job vertices to restore. State for these vertices is
* restored via {@link Execution
* @param errorIfNoCheckpoint Fail if no completed checkpoint is available to
* restore from.
* @param allowNonRestoredState Allow checkpoint state that cannot be mapped
* to any job vertex in tasks.
* @return <code>true</code> if state was restored, <code>false</code> otherwise.
* @throws IllegalStateException If the CheckpointCoordinator is shut down.
* @throws IllegalStateException If no completed checkpoint is available and
* the <code>failIfNoCheckpoint</code> flag has been set.
* @throws IllegalStateException If the checkpoint contains state that cannot be
* mapped to any job vertex in <code>tasks</code> and the
* <code>allowNonRestoredState</code> flag has not been set.
* @throws IllegalStateException If the max parallelism changed for an operator
* that restores state from this checkpoint.
* @throws IllegalStateException If the parallelism changed for an operator
* that restores <i>non-partitioned</i> state from this
* checkpoint.
*/
@Deprecated
public boolean restoreLatestCheckpointedState(
Map<JobVertexID, ExecutionJobVertex> tasks,
boolean errorIfNoCheckpoint,
boolean allowNonRestoredState) throws Exception {
return restoreLatestCheckpointedState(new HashSet<>(tasks.values()), errorIfNoCheckpoint, allowNonRestoredState);
}
/**
* Restores the latest checkpointed state.
*
* @param tasks Set of job vertices to restore. State for these vertices is
* restored via {@link Execution
* @param errorIfNoCheckpoint Fail if no completed checkpoint is available to
* restore from.
* @param allowNonRestoredState Allow checkpoint state that cannot be mapped
* to any job vertex in tasks.
* @return <code>true</code> if state was restored, <code>false</code> otherwise.
* @throws IllegalStateException If the CheckpointCoordinator is shut down.
* @throws IllegalStateException If no completed checkpoint is available and
* the <code>failIfNoCheckpoint</code> flag has been set.
* @throws IllegalStateException If the checkpoint contains state that cannot be
* mapped to any job vertex in <code>tasks</code> and the
* <code>allowNonRestoredState</code> flag has not been set.
* @throws IllegalStateException If the max parallelism changed for an operator
* that restores state from this checkpoint.
* @throws IllegalStateException If the parallelism changed for an operator
* that restores <i>non-partitioned</i> state from this
* checkpoint.
*/
public boolean restoreLatestCheckpointedState(
final Set<ExecutionJobVertex> tasks,
final boolean errorIfNoCheckpoint,
final boolean allowNonRestoredState) throws Exception {
synchronized (lock) {
if (shutdown) {
throw new IllegalStateException("CheckpointCoordinator is shut down");
}
sharedStateRegistry.close();
sharedStateRegistry = sharedStateRegistryFactory.create(executor);
completedCheckpointStore.recover();
for (CompletedCheckpoint completedCheckpoint : completedCheckpointStore.getAllCheckpoints()) {
completedCheckpoint.registerSharedStatesAfterRestored(sharedStateRegistry);
}
LOG.debug("Status of the shared state registry of job {} after restore: {}.", job, sharedStateRegistry);
CompletedCheckpoint latest = completedCheckpointStore.getLatestCheckpoint(isPreferCheckpointForRecovery);
if (latest == null) {
if (errorIfNoCheckpoint) {
throw new IllegalStateException("No completed checkpoint available");
} else {
LOG.debug("Resetting the master hooks.");
MasterHooks.reset(masterHooks.values(), LOG);
return false;
}
}
LOG.info("Restoring job {} from latest valid checkpoint: {}.", job, latest);
final Map<OperatorID, OperatorState> operatorStates = latest.getOperatorStates();
StateAssignmentOperation stateAssignmentOperation =
new StateAssignmentOperation(latest.getCheckpointID(), tasks, operatorStates, allowNonRestoredState);
stateAssignmentOperation.assignStates();
MasterHooks.restoreMasterHooks(
masterHooks,
latest.getMasterHookStates(),
latest.getCheckpointID(),
allowNonRestoredState,
LOG);
if (statsTracker != null) {
long restoreTimestamp = System.currentTimeMillis();
RestoredCheckpointStats restored = new RestoredCheckpointStats(
latest.getCheckpointID(),
latest.getProperties(),
restoreTimestamp,
latest.getExternalPointer());
statsTracker.reportRestoredCheckpoint(restored);
}
return true;
}
}
/**
* Restore the state with given savepoint.
*
* @param savepointPointer The pointer to the savepoint.
* @param allowNonRestored True if allowing checkpoint state that cannot be
* mapped to any job vertex in tasks.
* @param tasks Map of job vertices to restore. State for these
* vertices is restored via
* {@link Execution
* @param userClassLoader The class loader to resolve serialized classes in
* legacy savepoint versions.
*/
public boolean restoreSavepoint(
String savepointPointer,
boolean allowNonRestored,
Map<JobVertexID, ExecutionJobVertex> tasks,
ClassLoader userClassLoader) throws Exception {
Preconditions.checkNotNull(savepointPointer, "The savepoint path cannot be null.");
LOG.info("Starting job {} from savepoint {} ({})",
job, savepointPointer, (allowNonRestored ? "allowing non restored state" : ""));
final CompletedCheckpointStorageLocation checkpointLocation = checkpointStorage.resolveCheckpoint(savepointPointer);
CompletedCheckpoint savepoint = Checkpoints.loadAndValidateCheckpoint(
job, tasks, checkpointLocation, userClassLoader, allowNonRestored);
completedCheckpointStore.addCheckpoint(savepoint);
long nextCheckpointId = savepoint.getCheckpointID() + 1;
checkpointIdCounter.setCount(nextCheckpointId);
LOG.info("Reset the checkpoint ID of job {} to {}.", job, nextCheckpointId);
return restoreLatestCheckpointedState(new HashSet<>(tasks.values()), true, allowNonRestored);
}
public int getNumberOfPendingCheckpoints() {
return this.pendingCheckpoints.size();
}
public int getNumberOfRetainedSuccessfulCheckpoints() {
synchronized (lock) {
return completedCheckpointStore.getNumberOfRetainedCheckpoints();
}
}
public Map<Long, PendingCheckpoint> getPendingCheckpoints() {
synchronized (lock) {
return new HashMap<>(this.pendingCheckpoints);
}
}
public List<CompletedCheckpoint> getSuccessfulCheckpoints() throws Exception {
synchronized (lock) {
return completedCheckpointStore.getAllCheckpoints();
}
}
public CheckpointStorageCoordinatorView getCheckpointStorage() {
return checkpointStorage;
}
public CompletedCheckpointStore getCheckpointStore() {
return completedCheckpointStore;
}
public long getCheckpointTimeout() {
return checkpointTimeout;
}
public ArrayDeque<CheckpointTriggerRequest> getTriggerRequestQueue() {
return triggerRequestQueue;
}
public boolean isTriggering() {
return isTriggering;
}
@VisibleForTesting
boolean isCurrentPeriodicTriggerAvailable() {
return currentPeriodicTrigger != null;
}
/**
* Returns whether periodic checkpointing has been configured.
*
* @return <code>true</code> if periodic checkpoints have been configured.
*/
public boolean isPeriodicCheckpointingConfigured() {
return baseInterval != Long.MAX_VALUE;
}
public void startCheckpointScheduler() {
synchronized (lock) {
if (shutdown) {
throw new IllegalArgumentException("Checkpoint coordinator is shut down");
}
stopCheckpointScheduler();
periodicScheduling = true;
currentPeriodicTrigger = scheduleTriggerWithDelay(getRandomInitDelay());
}
}
public void stopCheckpointScheduler() {
synchronized (lock) {
periodicTriggeringSuspended = false;
periodicScheduling = false;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
final CheckpointException reason =
new CheckpointException(CheckpointFailureReason.CHECKPOINT_COORDINATOR_SUSPEND);
abortPendingAndQueuedCheckpoints(reason);
numUnsuccessfulCheckpointsTriggers.set(0);
}
}
/**
* Aborts all the pending checkpoints due to en exception.
* @param exception The exception.
*/
public void abortPendingCheckpoints(CheckpointException exception) {
synchronized (lock) {
abortPendingCheckpoints(ignored -> true, exception);
}
}
private void abortPendingCheckpoints(
Predicate<PendingCheckpoint> checkpointToFailPredicate,
CheckpointException exception) {
assert Thread.holdsLock(lock);
final PendingCheckpoint[] pendingCheckpointsToFail = pendingCheckpoints
.values()
.stream()
.filter(checkpointToFailPredicate)
.toArray(PendingCheckpoint[]::new);
for (PendingCheckpoint pendingCheckpoint : pendingCheckpointsToFail) {
abortPendingCheckpoint(pendingCheckpoint, exception);
}
}
/**
* If too many checkpoints are currently in progress, we need to mark that a request is queued.
*
* @throws CheckpointException If too many checkpoints are currently in progress.
*/
private void checkConcurrentCheckpoints() throws CheckpointException {
if (pendingCheckpoints.size() >= maxConcurrentCheckpointAttempts) {
periodicTriggeringSuspended = true;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
throw new CheckpointException(CheckpointFailureReason.TOO_MANY_CONCURRENT_CHECKPOINTS);
}
}
/**
* Make sure the minimum interval between checkpoints has passed.
*
* @throws CheckpointException If the minimum interval between checkpoints has not passed.
*/
private void checkMinPauseBetweenCheckpoints() throws CheckpointException {
final long nextCheckpointTriggerRelativeTime =
lastCheckpointCompletionRelativeTime + minPauseBetweenCheckpoints;
final long durationTillNextMillis =
nextCheckpointTriggerRelativeTime - clock.relativeTimeMillis();
if (durationTillNextMillis > 0) {
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
currentPeriodicTrigger = scheduleTriggerWithDelay(durationTillNextMillis);
throw new CheckpointException(CheckpointFailureReason.MINIMUM_TIME_BETWEEN_CHECKPOINTS);
}
}
private long getRandomInitDelay() {
return ThreadLocalRandom.current().nextLong(minPauseBetweenCheckpoints, baseInterval + 1L);
}
private ScheduledFuture<?> scheduleTriggerWithDelay(long initDelay) {
return timer.scheduleAtFixedRate(
new ScheduledTrigger(),
initDelay, baseInterval, TimeUnit.MILLISECONDS);
}
public JobStatusListener createActivatorDeactivator() {
synchronized (lock) {
if (shutdown) {
throw new IllegalArgumentException("Checkpoint coordinator is shut down");
}
if (jobStatusListener == null) {
jobStatusListener = new CheckpointCoordinatorDeActivator(this);
}
return jobStatusListener;
}
}
private final class ScheduledTrigger implements Runnable {
@Override
public void run() {
try {
triggerCheckpoint(System.currentTimeMillis(), true);
}
catch (Exception e) {
LOG.error("Exception while triggering checkpoint for job {}.", job, e);
}
}
}
/**
* Discards the given state object asynchronously belonging to the given job, execution attempt
* id and checkpoint id.
*
* @param jobId identifying the job to which the state object belongs
* @param executionAttemptID identifying the task to which the state object belongs
* @param checkpointId of the state object
* @param subtaskState to discard asynchronously
*/
private void discardSubtaskState(
final JobID jobId,
final ExecutionAttemptID executionAttemptID,
final long checkpointId,
final TaskStateSnapshot subtaskState) {
if (subtaskState != null) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
subtaskState.discardState();
} catch (Throwable t2) {
LOG.warn("Could not properly discard state object of checkpoint {} " +
"belonging to task {} of job {}.", checkpointId, executionAttemptID, jobId, t2);
}
}
});
}
}
private void abortPendingCheckpoint(
PendingCheckpoint pendingCheckpoint,
CheckpointException exception) {
abortPendingCheckpoint(pendingCheckpoint, exception, null);
}
private void abortPendingCheckpoint(
PendingCheckpoint pendingCheckpoint,
CheckpointException exception,
@Nullable final ExecutionAttemptID executionAttemptID) {
assert(Thread.holdsLock(lock));
if (!pendingCheckpoint.isDiscarded()) {
try {
pendingCheckpoint.abort(
exception.getCheckpointFailureReason(), exception.getCause());
if (pendingCheckpoint.getProps().isSavepoint() &&
pendingCheckpoint.getProps().isSynchronous()) {
failureManager.handleSynchronousSavepointFailure(exception);
} else if (executionAttemptID != null) {
failureManager.handleTaskLevelCheckpointException(
exception, pendingCheckpoint.getCheckpointId(), executionAttemptID);
} else {
failureManager.handleJobLevelCheckpointException(
exception, pendingCheckpoint.getCheckpointId());
}
} finally {
pendingCheckpoints.remove(pendingCheckpoint.getCheckpointId());
rememberRecentCheckpointId(pendingCheckpoint.getCheckpointId());
resumePeriodicTriggering();
}
}
}
private void preCheckBeforeTriggeringCheckpoint(boolean isPeriodic, boolean forceCheckpoint) throws CheckpointException {
preCheckGlobalState(isPeriodic);
if (!forceCheckpoint) {
checkConcurrentCheckpoints();
checkMinPauseBetweenCheckpoints();
}
}
private void preCheckGlobalState(boolean isPeriodic) throws CheckpointException {
if (shutdown) {
throw new CheckpointException(CheckpointFailureReason.CHECKPOINT_COORDINATOR_SHUTDOWN);
}
if (isPeriodic && !periodicScheduling) {
throw new CheckpointException(CheckpointFailureReason.PERIODIC_SCHEDULER_SHUTDOWN);
}
}
/**
* Check if all tasks that we need to trigger are running. If not, abort the checkpoint.
*
* @return the executions need to be triggered.
* @throws CheckpointException the exception fails checking
*/
private Execution[] getTriggerExecutions() throws CheckpointException {
Execution[] executions = new Execution[tasksToTrigger.length];
for (int i = 0; i < tasksToTrigger.length; i++) {
Execution ee = tasksToTrigger[i].getCurrentExecutionAttempt();
if (ee == null) {
LOG.info(
"Checkpoint triggering task {} of job {} is not being executed at the moment. Aborting checkpoint.",
tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
job);
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
} else if (ee.getState() == ExecutionState.RUNNING) {
executions[i] = ee;
} else {
LOG.info(
"Checkpoint triggering task {} of job {} is not in state {} but {} instead. Aborting checkpoint.",
tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
job,
ExecutionState.RUNNING,
ee.getState());
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
return executions;
}
/**
* Check if all tasks that need to acknowledge the checkpoint are running.
* If not, abort the checkpoint
*
* @return the execution vertices which should give an ack response
* @throws CheckpointException the exception fails checking
*/
private Map<ExecutionAttemptID, ExecutionVertex> getAckTasks() throws CheckpointException {
Map<ExecutionAttemptID, ExecutionVertex> ackTasks = new HashMap<>(tasksToWaitFor.length);
for (ExecutionVertex ev : tasksToWaitFor) {
Execution ee = ev.getCurrentExecutionAttempt();
if (ee != null) {
ackTasks.put(ee.getAttemptId(), ev);
} else {
LOG.info(
"Checkpoint acknowledging task {} of job {} is not being executed at the moment. Aborting checkpoint.",
ev.getTaskNameWithSubtaskIndex(),
job);
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
return ackTasks;
}
private void abortPendingAndQueuedCheckpoints(CheckpointException exception) {
assert(Thread.holdsLock(lock));
CheckpointTriggerRequest request;
while ((request = triggerRequestQueue.poll()) != null) {
request.onCompletionPromise.completeExceptionally(exception);
}
abortPendingCheckpoints(exception);
}
/**
* The canceller of checkpoint. The checkpoint might be cancelled if it doesn't finish in a
* configured period.
*/
private class CheckpointCanceller implements Runnable {
private final PendingCheckpoint pendingCheckpoint;
private CheckpointCanceller(PendingCheckpoint pendingCheckpoint) {
this.pendingCheckpoint = checkNotNull(pendingCheckpoint);
}
@Override
public void run() {
synchronized (lock) {
if (!pendingCheckpoint.isDiscarded()) {
LOG.info("Checkpoint {} of job {} expired before completing.",
pendingCheckpoint.getCheckpointId(), job);
abortPendingCheckpoint(
pendingCheckpoint,
new CheckpointException(CheckpointFailureReason.CHECKPOINT_EXPIRED));
}
}
}
}
private static CheckpointException getCheckpointException(
CheckpointFailureReason defaultReason, Throwable throwable) {
final Optional<CheckpointException> checkpointExceptionOptional =
ExceptionUtils.findThrowable(throwable, CheckpointException.class);
return checkpointExceptionOptional
.orElseGet(() -> new CheckpointException(defaultReason, throwable));
}
private static class CheckpointIdAndStorageLocation {
private final long checkpointId;
private final CheckpointStorageLocation checkpointStorageLocation;
CheckpointIdAndStorageLocation(
long checkpointId,
CheckpointStorageLocation checkpointStorageLocation) {
this.checkpointId = checkpointId;
this.checkpointStorageLocation = checkNotNull(checkpointStorageLocation);
}
}
private static class CheckpointTriggerRequest {
private final long timestamp;
private final CheckpointProperties props;
private final @Nullable String externalSavepointLocation;
private final boolean isPeriodic;
private final boolean advanceToEndOfTime;
private final CompletableFuture<CompletedCheckpoint> onCompletionPromise;
CheckpointTriggerRequest(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
this.timestamp = timestamp;
this.props = checkNotNull(props);
this.externalSavepointLocation = externalSavepointLocation;
this.isPeriodic = isPeriodic;
this.advanceToEndOfTime = advanceToEndOfTime;
this.onCompletionPromise = checkNotNull(onCompletionPromise);
}
}
}
|
class CheckpointCoordinator {
private static final Logger LOG = LoggerFactory.getLogger(CheckpointCoordinator.class);
/** The number of recent checkpoints whose IDs are remembered. */
private static final int NUM_GHOST_CHECKPOINT_IDS = 16;
/** Coordinator-wide lock to safeguard the checkpoint updates. */
private final Object lock = new Object();
/** The job whose checkpoint this coordinator coordinates. */
private final JobID job;
/** Default checkpoint properties. **/
private final CheckpointProperties checkpointProperties;
/** The executor used for asynchronous calls, like potentially blocking I/O. */
private final Executor executor;
/** Tasks who need to be sent a message when a checkpoint is started. */
private final ExecutionVertex[] tasksToTrigger;
/** Tasks who need to acknowledge a checkpoint before it succeeds. */
private final ExecutionVertex[] tasksToWaitFor;
/** Tasks who need to be sent a message when a checkpoint is confirmed. */
private final ExecutionVertex[] tasksToCommitTo;
/** Map from checkpoint ID to the pending checkpoint. */
private final Map<Long, PendingCheckpoint> pendingCheckpoints;
/** Completed checkpoints. Implementations can be blocking. Make sure calls to methods
* accessing this don't block the job manager actor and run asynchronously. */
private final CompletedCheckpointStore completedCheckpointStore;
/** The root checkpoint state backend, which is responsible for initializing the
* checkpoint, storing the metadata, and cleaning up the checkpoint. */
private final CheckpointStorageCoordinatorView checkpointStorage;
/** A list of recent checkpoint IDs, to identify late messages (vs invalid ones). */
private final ArrayDeque<Long> recentPendingCheckpoints;
/** Checkpoint ID counter to ensure ascending IDs. In case of job manager failures, these
* need to be ascending across job managers. */
private final CheckpointIDCounter checkpointIdCounter;
/** The base checkpoint interval. Actual trigger time may be affected by the
* max concurrent checkpoints and minimum-pause values */
private final long baseInterval;
/** The max time (in ms) that a checkpoint may take. */
private final long checkpointTimeout;
/** The min time(in ms) to delay after a checkpoint could be triggered. Allows to
* enforce minimum processing time between checkpoint attempts */
private final long minPauseBetweenCheckpoints;
/** The maximum number of checkpoints that may be in progress at the same time. */
private final int maxConcurrentCheckpointAttempts;
/** The timer that handles the checkpoint timeouts and triggers periodic checkpoints.
* It must be single-threaded. Eventually it will be replaced by main thread executor. */
private final ScheduledExecutor timer;
/** The master checkpoint hooks executed by this checkpoint coordinator. */
private final HashMap<String, MasterTriggerRestoreHook<?>> masterHooks;
/** Actor that receives status updates from the execution graph this coordinator works for. */
private JobStatusListener jobStatusListener;
/** The number of consecutive failed trigger attempts. */
private final AtomicInteger numUnsuccessfulCheckpointsTriggers = new AtomicInteger(0);
/** A handle to the current periodic trigger, to cancel it when necessary. */
private ScheduledFuture<?> currentPeriodicTrigger;
/** The timestamp (via {@link Clock
* completed. */
private long lastCheckpointCompletionRelativeTime;
/** Flag whether a triggered checkpoint should immediately schedule the next checkpoint.
* Non-volatile, because only accessed in synchronized scope */
private boolean periodicScheduling;
/** Flag whether periodic triggering is suspended (too many concurrent pending checkpoint).
* Non-volatile, because only accessed in synchronized scope */
private boolean periodicTriggeringSuspended;
/** Flag marking the coordinator as shut down (not accepting any messages any more). */
private volatile boolean shutdown;
/** Optional tracker for checkpoint statistics. */
@Nullable
private CheckpointStatsTracker statsTracker;
/** A factory for SharedStateRegistry objects. */
private final SharedStateRegistryFactory sharedStateRegistryFactory;
/** Registry that tracks state which is shared across (incremental) checkpoints. */
private SharedStateRegistry sharedStateRegistry;
private boolean isPreferCheckpointForRecovery;
private final CheckpointFailureManager failureManager;
private final Clock clock;
/** Flag represents there is an in-flight trigger request. */
private boolean isTriggering = false;
/** A queue to cache those trigger requests which can't be trigger immediately. */
private final ArrayDeque<CheckpointTriggerRequest> triggerRequestQueue;
public CheckpointCoordinator(
JobID job,
CheckpointCoordinatorConfiguration chkConfig,
ExecutionVertex[] tasksToTrigger,
ExecutionVertex[] tasksToWaitFor,
ExecutionVertex[] tasksToCommitTo,
CheckpointIDCounter checkpointIDCounter,
CompletedCheckpointStore completedCheckpointStore,
StateBackend checkpointStateBackend,
Executor executor,
ScheduledExecutor timer,
SharedStateRegistryFactory sharedStateRegistryFactory,
CheckpointFailureManager failureManager) {
this(
job,
chkConfig,
tasksToTrigger,
tasksToWaitFor,
tasksToCommitTo,
checkpointIDCounter,
completedCheckpointStore,
checkpointStateBackend,
executor,
timer,
sharedStateRegistryFactory,
failureManager,
SystemClock.getInstance());
}
@VisibleForTesting
public CheckpointCoordinator(
JobID job,
CheckpointCoordinatorConfiguration chkConfig,
ExecutionVertex[] tasksToTrigger,
ExecutionVertex[] tasksToWaitFor,
ExecutionVertex[] tasksToCommitTo,
CheckpointIDCounter checkpointIDCounter,
CompletedCheckpointStore completedCheckpointStore,
StateBackend checkpointStateBackend,
Executor executor,
ScheduledExecutor timer,
SharedStateRegistryFactory sharedStateRegistryFactory,
CheckpointFailureManager failureManager,
Clock clock) {
checkNotNull(checkpointStateBackend);
long minPauseBetweenCheckpoints = chkConfig.getMinPauseBetweenCheckpoints();
if (minPauseBetweenCheckpoints > 365L * 24 * 60 * 60 * 1_000) {
minPauseBetweenCheckpoints = 365L * 24 * 60 * 60 * 1_000;
}
long baseInterval = chkConfig.getCheckpointInterval();
if (baseInterval < minPauseBetweenCheckpoints) {
baseInterval = minPauseBetweenCheckpoints;
}
this.job = checkNotNull(job);
this.baseInterval = baseInterval;
this.checkpointTimeout = chkConfig.getCheckpointTimeout();
this.minPauseBetweenCheckpoints = minPauseBetweenCheckpoints;
this.maxConcurrentCheckpointAttempts = chkConfig.getMaxConcurrentCheckpoints();
this.tasksToTrigger = checkNotNull(tasksToTrigger);
this.tasksToWaitFor = checkNotNull(tasksToWaitFor);
this.tasksToCommitTo = checkNotNull(tasksToCommitTo);
this.pendingCheckpoints = new LinkedHashMap<>();
this.checkpointIdCounter = checkNotNull(checkpointIDCounter);
this.completedCheckpointStore = checkNotNull(completedCheckpointStore);
this.executor = checkNotNull(executor);
this.sharedStateRegistryFactory = checkNotNull(sharedStateRegistryFactory);
this.sharedStateRegistry = sharedStateRegistryFactory.create(executor);
this.isPreferCheckpointForRecovery = chkConfig.isPreferCheckpointForRecovery();
this.failureManager = checkNotNull(failureManager);
this.clock = checkNotNull(clock);
this.recentPendingCheckpoints = new ArrayDeque<>(NUM_GHOST_CHECKPOINT_IDS);
this.masterHooks = new HashMap<>();
this.triggerRequestQueue = new ArrayDeque<>();
this.timer = timer;
this.checkpointProperties = CheckpointProperties.forCheckpoint(chkConfig.getCheckpointRetentionPolicy());
try {
this.checkpointStorage = checkpointStateBackend.createCheckpointStorage(job);
checkpointStorage.initializeBaseLocations();
} catch (IOException e) {
throw new FlinkRuntimeException("Failed to create checkpoint storage at checkpoint coordinator side.", e);
}
try {
checkpointIDCounter.start();
} catch (Throwable t) {
throw new RuntimeException("Failed to start checkpoint ID counter: " + t.getMessage(), t);
}
}
/**
* Adds the given master hook to the checkpoint coordinator. This method does nothing, if
* the checkpoint coordinator already contained a hook with the same ID (as defined via
* {@link MasterTriggerRestoreHook
*
* @param hook The hook to add.
* @return True, if the hook was added, false if the checkpoint coordinator already
* contained a hook with the same ID.
*/
public boolean addMasterHook(MasterTriggerRestoreHook<?> hook) {
checkNotNull(hook);
final String id = hook.getIdentifier();
checkArgument(!StringUtils.isNullOrWhitespaceOnly(id), "The hook has a null or empty id");
synchronized (lock) {
if (!masterHooks.containsKey(id)) {
masterHooks.put(id, hook);
return true;
}
else {
return false;
}
}
}
/**
* Gets the number of currently register master hooks.
*/
public int getNumberOfRegisteredMasterHooks() {
synchronized (lock) {
return masterHooks.size();
}
}
/**
* Sets the checkpoint stats tracker.
*
* @param statsTracker The checkpoint stats tracker.
*/
public void setCheckpointStatsTracker(@Nullable CheckpointStatsTracker statsTracker) {
this.statsTracker = statsTracker;
}
/**
* Shuts down the checkpoint coordinator.
*
* <p>After this method has been called, the coordinator does not accept
* and further messages and cannot trigger any further checkpoints.
*/
public void shutdown(JobStatus jobStatus) throws Exception {
synchronized (lock) {
if (!shutdown) {
shutdown = true;
LOG.info("Stopping checkpoint coordinator for job {}.", job);
periodicScheduling = false;
periodicTriggeringSuspended = false;
MasterHooks.close(masterHooks.values(), LOG);
masterHooks.clear();
final CheckpointException reason = new CheckpointException(
CheckpointFailureReason.CHECKPOINT_COORDINATOR_SHUTDOWN);
abortPendingAndQueuedCheckpoints(reason);
completedCheckpointStore.shutdown(jobStatus);
checkpointIdCounter.shutdown(jobStatus);
}
}
}
public boolean isShutdown() {
return shutdown;
}
/**
* Triggers a savepoint with the given savepoint directory as a target.
*
* @param timestamp The timestamp for the savepoint.
* @param targetLocation Target location for the savepoint, optional. If null, the
* state backend's configured default will be used.
* @return A future to the completed checkpoint
* @throws IllegalStateException If no savepoint directory has been
* specified and no default savepoint directory has been
* configured
*/
public CompletableFuture<CompletedCheckpoint> triggerSavepoint(
final long timestamp,
@Nullable final String targetLocation) {
final CheckpointProperties properties = CheckpointProperties.forSavepoint();
return triggerSavepointInternal(timestamp, properties, false, targetLocation);
}
/**
* Triggers a synchronous savepoint with the given savepoint directory as a target.
*
* @param timestamp The timestamp for the savepoint.
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers.
* @param targetLocation Target location for the savepoint, optional. If null, the
* state backend's configured default will be used.
* @return A future to the completed checkpoint
* @throws IllegalStateException If no savepoint directory has been
* specified and no default savepoint directory has been
* configured
*/
public CompletableFuture<CompletedCheckpoint> triggerSynchronousSavepoint(
final long timestamp,
final boolean advanceToEndOfEventTime,
@Nullable final String targetLocation) {
final CheckpointProperties properties = CheckpointProperties.forSyncSavepoint();
return triggerSavepointInternal(timestamp, properties, advanceToEndOfEventTime, targetLocation);
}
private CompletableFuture<CompletedCheckpoint> triggerSavepointInternal(
final long timestamp,
final CheckpointProperties checkpointProperties,
final boolean advanceToEndOfEventTime,
@Nullable final String targetLocation) {
checkNotNull(checkpointProperties);
final CompletableFuture<CompletedCheckpoint> resultFuture = new CompletableFuture<>();
timer.execute(() -> triggerCheckpoint(
timestamp,
checkpointProperties,
targetLocation,
false,
advanceToEndOfEventTime)
.whenComplete((completedCheckpoint, throwable) -> {
if (throwable == null) {
resultFuture.complete(completedCheckpoint);
} else {
resultFuture.completeExceptionally(throwable);
}
}));
return resultFuture;
}
/**
* Triggers a new standard checkpoint and uses the given timestamp as the checkpoint
* timestamp. The return value is a future. It completes when the checkpoint triggered finishes
* or an error occurred.
*
* @param timestamp The timestamp for the checkpoint.
* @param isPeriodic Flag indicating whether this triggered checkpoint is
* periodic. If this flag is true, but the periodic scheduler is disabled,
* the checkpoint will be declined.
* @return a future to the completed checkpoint.
*/
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(long timestamp, boolean isPeriodic) {
return triggerCheckpoint(timestamp, checkpointProperties, null, isPeriodic, false);
}
@VisibleForTesting
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime) {
if (advanceToEndOfTime && !(props.isSynchronous() && props.isSavepoint())) {
return FutureUtils.completedExceptionally(new IllegalArgumentException(
"Only synchronous savepoints are allowed to advance the watermark to MAX."));
}
final CompletableFuture<CompletedCheckpoint> onCompletionPromise =
new CompletableFuture<>();
synchronized (lock) {
if (isTriggering || !triggerRequestQueue.isEmpty()) {
triggerRequestQueue.add(new CheckpointTriggerRequest(
timestamp,
props,
externalSavepointLocation,
isPeriodic,
advanceToEndOfTime,
onCompletionPromise));
return onCompletionPromise;
}
}
startTriggeringCheckpoint(
timestamp,
props,
externalSavepointLocation,
isPeriodic,
advanceToEndOfTime,
onCompletionPromise);
return onCompletionPromise;
}
private void startTriggeringCheckpoint(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
try {
synchronized (lock) {
preCheckBeforeTriggeringCheckpoint(isPeriodic, props.forceCheckpoint());
}
final Execution[] executions = getTriggerExecutions();
final Map<ExecutionAttemptID, ExecutionVertex> ackTasks = getAckTasks();
Preconditions.checkState(!isTriggering);
isTriggering = true;
final CompletableFuture<PendingCheckpoint> pendingCheckpointCompletableFuture =
initializeCheckpoint(props, externalSavepointLocation)
.thenApplyAsync(
(checkpointIdAndStorageLocation) -> createPendingCheckpoint(
timestamp,
props,
ackTasks,
isPeriodic,
checkpointIdAndStorageLocation.checkpointId,
checkpointIdAndStorageLocation.checkpointStorageLocation,
onCompletionPromise),
timer);
pendingCheckpointCompletableFuture
.thenCompose(this::snapshotMasterState)
.whenCompleteAsync(
(ignored, throwable) -> {
final PendingCheckpoint checkpoint =
FutureUtils.getWithoutException(pendingCheckpointCompletableFuture);
if (throwable == null && checkpoint != null && !checkpoint.isDiscarded()) {
snapshotTaskState(
timestamp,
checkpoint.getCheckpointId(),
checkpoint.getCheckpointStorageLocation(),
props,
executions,
advanceToEndOfTime);
onTriggerSuccess();
} else {
if (checkpoint == null) {
onTriggerFailure(onCompletionPromise, throwable);
} else {
onTriggerFailure(checkpoint, throwable);
}
}
},
timer);
} catch (Throwable throwable) {
onTriggerFailure(onCompletionPromise, throwable);
}
}
/**
* Initialize the checkpoint trigger asynchronously. It will be executed in io thread due to
* it might be time-consuming.
*
* @param props checkpoint properties
* @param externalSavepointLocation the external savepoint location, it might be null
* @return the future of initialized result, checkpoint id and checkpoint location
*/
private CompletableFuture<CheckpointIdAndStorageLocation> initializeCheckpoint(
CheckpointProperties props,
@Nullable String externalSavepointLocation) {
return CompletableFuture.supplyAsync(() -> {
try {
long checkpointID = checkpointIdCounter.getAndIncrement();
CheckpointStorageLocation checkpointStorageLocation = props.isSavepoint() ?
checkpointStorage
.initializeLocationForSavepoint(checkpointID, externalSavepointLocation) :
checkpointStorage.initializeLocationForCheckpoint(checkpointID);
return new CheckpointIdAndStorageLocation(checkpointID, checkpointStorageLocation);
} catch (Throwable throwable) {
throw new CompletionException(throwable);
}
}, executor);
}
private PendingCheckpoint createPendingCheckpoint(
long timestamp,
CheckpointProperties props,
Map<ExecutionAttemptID, ExecutionVertex> ackTasks,
boolean isPeriodic,
long checkpointID,
CheckpointStorageLocation checkpointStorageLocation,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
synchronized (lock) {
try {
preCheckGlobalState(isPeriodic);
} catch (Throwable t) {
throw new CompletionException(t);
}
}
final PendingCheckpoint checkpoint = new PendingCheckpoint(
job,
checkpointID,
timestamp,
ackTasks,
masterHooks.keySet(),
props,
checkpointStorageLocation,
executor,
onCompletionPromise);
if (statsTracker != null) {
PendingCheckpointStats callback = statsTracker.reportPendingCheckpoint(
checkpointID,
timestamp,
props);
checkpoint.setStatsCallback(callback);
}
synchronized (lock) {
pendingCheckpoints.put(checkpointID, checkpoint);
ScheduledFuture<?> cancellerHandle = timer.schedule(
new CheckpointCanceller(checkpoint),
checkpointTimeout, TimeUnit.MILLISECONDS);
if (!checkpoint.setCancellerHandle(cancellerHandle)) {
cancellerHandle.cancel(false);
}
}
LOG.info("Triggering checkpoint {} @ {} for job {}.", checkpointID, timestamp, job);
return checkpoint;
}
/**
* Snapshot master hook states asynchronously.
*
* @param checkpoint the pending checkpoint
* @return the future represents master hook states are finished or not
*/
private CompletableFuture<Void> snapshotMasterState(PendingCheckpoint checkpoint) {
if (masterHooks.isEmpty()) {
return CompletableFuture.completedFuture(null);
}
final long checkpointID = checkpoint.getCheckpointId();
final long timestamp = checkpoint.getCheckpointTimestamp();
final CompletableFuture<Void> masterStateCompletableFuture = new CompletableFuture<>();
for (MasterTriggerRestoreHook<?> masterHook : masterHooks.values()) {
MasterHooks
.triggerHook(masterHook, checkpointID, timestamp, executor)
.whenCompleteAsync(
(masterState, throwable) -> {
try {
synchronized (lock) {
if (masterStateCompletableFuture.isDone()) {
return;
}
if (checkpoint.isDiscarded()) {
throw new IllegalStateException(
"Checkpoint " + checkpointID + " has been discarded");
}
if (throwable == null) {
checkpoint.acknowledgeMasterState(
masterHook.getIdentifier(), masterState);
if (checkpoint.areMasterStatesFullyAcknowledged()) {
masterStateCompletableFuture.complete(null);
}
} else {
masterStateCompletableFuture.completeExceptionally(throwable);
}
}
} catch (Throwable t) {
masterStateCompletableFuture.completeExceptionally(t);
}
},
timer);
}
return masterStateCompletableFuture;
}
/**
* Snapshot task state.
*
* @param timestamp the timestamp of this checkpoint reques
* @param checkpointID the checkpoint id
* @param checkpointStorageLocation the checkpoint location
* @param props the checkpoint properties
* @param executions the executions which should be triggered
* @param advanceToEndOfTime Flag indicating if the source should inject a {@code MAX_WATERMARK}
* in the pipeline to fire any registered event-time timers.
*/
private void snapshotTaskState(
long timestamp,
long checkpointID,
CheckpointStorageLocation checkpointStorageLocation,
CheckpointProperties props,
Execution[] executions,
boolean advanceToEndOfTime) {
final CheckpointOptions checkpointOptions = new CheckpointOptions(
props.getCheckpointType(),
checkpointStorageLocation.getLocationReference());
for (Execution execution: executions) {
if (props.isSynchronous()) {
execution.triggerSynchronousSavepoint(checkpointID, timestamp, checkpointOptions, advanceToEndOfTime);
} else {
execution.triggerCheckpoint(checkpointID, timestamp, checkpointOptions);
}
}
}
/**
* Trigger request is successful.
* NOTE, it must be invoked if trigger request is successful.
*/
private void onTriggerSuccess() {
isTriggering = false;
numUnsuccessfulCheckpointsTriggers.set(0);
checkQueuedCheckpointTriggerRequest();
}
/**
* The trigger request is failed prematurely without a proper initialization.
* There is no resource to release, but the completion promise needs to fail manually here.
*
* @param onCompletionPromise the completion promise of the checkpoint/savepoint
* @param throwable the reason of trigger failure
*/
private void onTriggerFailure(
CompletableFuture<CompletedCheckpoint> onCompletionPromise, Throwable throwable) {
final CheckpointException checkpointException =
getCheckpointException(CheckpointFailureReason.TRIGGER_CHECKPOINT_FAILURE, throwable);
onCompletionPromise.completeExceptionally(checkpointException);
onTriggerFailure((PendingCheckpoint) null, checkpointException);
}
/**
* The trigger request is failed.
* NOTE, it must be invoked if trigger request is failed.
*
* @param checkpoint the pending checkpoint which is failed. It could be null if it's failed
* prematurely without a proper initialization.
* @param throwable the reason of trigger failure
*/
/**
* Checks whether there is a trigger request queued. Consumes it if there is one.
* NOTE: this must be called after each triggering
*/
private void checkQueuedCheckpointTriggerRequest() {
synchronized (lock) {
if (triggerRequestQueue.isEmpty()) {
return;
}
}
final CheckpointTriggerRequest request;
synchronized (lock) {
request = triggerRequestQueue.poll();
}
if (request != null) {
startTriggeringCheckpoint(
request.timestamp,
request.props,
request.externalSavepointLocation,
request.isPeriodic,
request.advanceToEndOfTime,
request.onCompletionPromise);
}
}
/**
* Receives a {@link DeclineCheckpoint} message for a pending checkpoint.
*
* @param message Checkpoint decline from the task manager
* @param taskManagerLocationInfo The location info of the decline checkpoint message's sender
*/
public void receiveDeclineMessage(DeclineCheckpoint message, String taskManagerLocationInfo) {
if (shutdown || message == null) {
return;
}
if (!job.equals(message.getJob())) {
throw new IllegalArgumentException("Received DeclineCheckpoint message for job " +
message.getJob() + " from " + taskManagerLocationInfo + " while this coordinator handles job " + job);
}
final long checkpointId = message.getCheckpointId();
final String reason = (message.getReason() != null ? message.getReason().getMessage() : "");
PendingCheckpoint checkpoint;
synchronized (lock) {
if (shutdown) {
return;
}
checkpoint = pendingCheckpoints.get(checkpointId);
if (checkpoint != null) {
Preconditions.checkState(
!checkpoint.isDiscarded(),
"Received message for discarded but non-removed checkpoint " + checkpointId);
LOG.info("Decline checkpoint {} by task {} of job {} at {}.",
checkpointId,
message.getTaskExecutionId(),
job,
taskManagerLocationInfo);
final CheckpointException checkpointException;
if (message.getReason() == null) {
checkpointException =
new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED);
} else {
checkpointException = getCheckpointException(
CheckpointFailureReason.JOB_FAILURE, message.getReason());
}
abortPendingCheckpoint(
checkpoint,
checkpointException,
message.getTaskExecutionId());
} else if (LOG.isDebugEnabled()) {
if (recentPendingCheckpoints.contains(checkpointId)) {
LOG.debug("Received another decline message for now expired checkpoint attempt {} from task {} of job {} at {} : {}",
checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason);
} else {
LOG.debug("Received decline message for unknown (too old?) checkpoint attempt {} from task {} of job {} at {} : {}",
checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason);
}
}
}
}
/**
* Receives an AcknowledgeCheckpoint message and returns whether the
* message was associated with a pending checkpoint.
*
* @param message Checkpoint ack from the task manager
*
* @param taskManagerLocationInfo The location of the acknowledge checkpoint message's sender
* @return Flag indicating whether the ack'd checkpoint was associated
* with a pending checkpoint.
*
* @throws CheckpointException If the checkpoint cannot be added to the completed checkpoint store.
*/
public boolean receiveAcknowledgeMessage(AcknowledgeCheckpoint message, String taskManagerLocationInfo) throws CheckpointException {
if (shutdown || message == null) {
return false;
}
if (!job.equals(message.getJob())) {
LOG.error("Received wrong AcknowledgeCheckpoint message for job {} from {} : {}", job, taskManagerLocationInfo, message);
return false;
}
final long checkpointId = message.getCheckpointId();
synchronized (lock) {
if (shutdown) {
return false;
}
final PendingCheckpoint checkpoint = pendingCheckpoints.get(checkpointId);
if (checkpoint != null && !checkpoint.isDiscarded()) {
switch (checkpoint.acknowledgeTask(message.getTaskExecutionId(), message.getSubtaskState(), message.getCheckpointMetrics())) {
case SUCCESS:
LOG.debug("Received acknowledge message for checkpoint {} from task {} of job {} at {}.",
checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
if (checkpoint.areTasksFullyAcknowledged()) {
completePendingCheckpoint(checkpoint);
}
break;
case DUPLICATE:
LOG.debug("Received a duplicate acknowledge message for checkpoint {}, task {}, job {}, location {}.",
message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
break;
case UNKNOWN:
LOG.warn("Could not acknowledge the checkpoint {} for task {} of job {} at {}, " +
"because the task's execution attempt id was unknown. Discarding " +
"the state handle to avoid lingering state.", message.getCheckpointId(),
message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
break;
case DISCARDED:
LOG.warn("Could not acknowledge the checkpoint {} for task {} of job {} at {}, " +
"because the pending checkpoint had been discarded. Discarding the " +
"state handle tp avoid lingering state.",
message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
}
return true;
}
else if (checkpoint != null) {
throw new IllegalStateException(
"Received message for discarded but non-removed checkpoint " + checkpointId);
}
else {
boolean wasPendingCheckpoint;
if (recentPendingCheckpoints.contains(checkpointId)) {
wasPendingCheckpoint = true;
LOG.warn("Received late message for now expired checkpoint attempt {} from task " +
"{} of job {} at {}.", checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
}
else {
LOG.debug("Received message for an unknown checkpoint {} from task {} of job {} at {}.",
checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo);
wasPendingCheckpoint = false;
}
discardSubtaskState(message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState());
return wasPendingCheckpoint;
}
}
}
/**
* Try to complete the given pending checkpoint.
*
* <p>Important: This method should only be called in the checkpoint lock scope.
*
* @param pendingCheckpoint to complete
* @throws CheckpointException if the completion failed
*/
private void completePendingCheckpoint(PendingCheckpoint pendingCheckpoint) throws CheckpointException {
final long checkpointId = pendingCheckpoint.getCheckpointId();
final CompletedCheckpoint completedCheckpoint;
Map<OperatorID, OperatorState> operatorStates = pendingCheckpoint.getOperatorStates();
sharedStateRegistry.registerAll(operatorStates.values());
try {
try {
completedCheckpoint = pendingCheckpoint.finalizeCheckpoint();
failureManager.handleCheckpointSuccess(pendingCheckpoint.getCheckpointId());
}
catch (Exception e1) {
if (!pendingCheckpoint.isDiscarded()) {
abortPendingCheckpoint(
pendingCheckpoint,
new CheckpointException(
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, e1));
}
throw new CheckpointException("Could not finalize the pending checkpoint " + checkpointId + '.',
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, e1);
}
Preconditions.checkState(pendingCheckpoint.isDiscarded() && completedCheckpoint != null);
try {
completedCheckpointStore.addCheckpoint(completedCheckpoint);
} catch (Exception exception) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
completedCheckpoint.discardOnFailedStoring();
} catch (Throwable t) {
LOG.warn("Could not properly discard completed checkpoint {}.", completedCheckpoint.getCheckpointID(), t);
}
}
});
throw new CheckpointException("Could not complete the pending checkpoint " + checkpointId + '.',
CheckpointFailureReason.FINALIZE_CHECKPOINT_FAILURE, exception);
}
} finally {
pendingCheckpoints.remove(checkpointId);
resumePeriodicTriggering();
}
rememberRecentCheckpointId(checkpointId);
dropSubsumedCheckpoints(checkpointId);
lastCheckpointCompletionRelativeTime = clock.relativeTimeMillis();
LOG.info("Completed checkpoint {} for job {} ({} bytes in {} ms).", checkpointId, job,
completedCheckpoint.getStateSize(), completedCheckpoint.getDuration());
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("Checkpoint state: ");
for (OperatorState state : completedCheckpoint.getOperatorStates().values()) {
builder.append(state);
builder.append(", ");
}
builder.setLength(builder.length() - 2);
LOG.debug(builder.toString());
}
final long timestamp = completedCheckpoint.getTimestamp();
for (ExecutionVertex ev : tasksToCommitTo) {
Execution ee = ev.getCurrentExecutionAttempt();
if (ee != null) {
ee.notifyCheckpointComplete(checkpointId, timestamp);
}
}
}
/**
* Fails all pending checkpoints which have not been acknowledged by the given execution
* attempt id.
*
* @param executionAttemptId for which to discard unacknowledged pending checkpoints
* @param cause of the failure
*/
public void failUnacknowledgedPendingCheckpointsFor(ExecutionAttemptID executionAttemptId, Throwable cause) {
synchronized (lock) {
abortPendingCheckpoints(
checkpoint -> !checkpoint.isAcknowledgedBy(executionAttemptId),
new CheckpointException(CheckpointFailureReason.TASK_FAILURE, cause));
}
}
private void rememberRecentCheckpointId(long id) {
if (recentPendingCheckpoints.size() >= NUM_GHOST_CHECKPOINT_IDS) {
recentPendingCheckpoints.removeFirst();
}
recentPendingCheckpoints.addLast(id);
}
private void dropSubsumedCheckpoints(long checkpointId) {
abortPendingCheckpoints(
checkpoint -> checkpoint.getCheckpointId() < checkpointId && checkpoint.canBeSubsumed(),
new CheckpointException(CheckpointFailureReason.CHECKPOINT_SUBSUMED));
}
/**
* Resumes suspended periodic triggering.
*
* <p>NOTE: The caller of this method must hold the lock when invoking the method!
*/
private void resumePeriodicTriggering() {
assert(Thread.holdsLock(lock));
if (shutdown || !periodicScheduling) {
return;
}
if (periodicTriggeringSuspended) {
periodicTriggeringSuspended = false;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
}
currentPeriodicTrigger = scheduleTriggerWithDelay(0L);
}
}
/**
* Restores the latest checkpointed state.
*
* @param tasks Map of job vertices to restore. State for these vertices is
* restored via {@link Execution
* @param errorIfNoCheckpoint Fail if no completed checkpoint is available to
* restore from.
* @param allowNonRestoredState Allow checkpoint state that cannot be mapped
* to any job vertex in tasks.
* @return <code>true</code> if state was restored, <code>false</code> otherwise.
* @throws IllegalStateException If the CheckpointCoordinator is shut down.
* @throws IllegalStateException If no completed checkpoint is available and
* the <code>failIfNoCheckpoint</code> flag has been set.
* @throws IllegalStateException If the checkpoint contains state that cannot be
* mapped to any job vertex in <code>tasks</code> and the
* <code>allowNonRestoredState</code> flag has not been set.
* @throws IllegalStateException If the max parallelism changed for an operator
* that restores state from this checkpoint.
* @throws IllegalStateException If the parallelism changed for an operator
* that restores <i>non-partitioned</i> state from this
* checkpoint.
*/
@Deprecated
public boolean restoreLatestCheckpointedState(
Map<JobVertexID, ExecutionJobVertex> tasks,
boolean errorIfNoCheckpoint,
boolean allowNonRestoredState) throws Exception {
return restoreLatestCheckpointedState(new HashSet<>(tasks.values()), errorIfNoCheckpoint, allowNonRestoredState);
}
/**
* Restores the latest checkpointed state.
*
* @param tasks Set of job vertices to restore. State for these vertices is
* restored via {@link Execution
* @param errorIfNoCheckpoint Fail if no completed checkpoint is available to
* restore from.
* @param allowNonRestoredState Allow checkpoint state that cannot be mapped
* to any job vertex in tasks.
* @return <code>true</code> if state was restored, <code>false</code> otherwise.
* @throws IllegalStateException If the CheckpointCoordinator is shut down.
* @throws IllegalStateException If no completed checkpoint is available and
* the <code>failIfNoCheckpoint</code> flag has been set.
* @throws IllegalStateException If the checkpoint contains state that cannot be
* mapped to any job vertex in <code>tasks</code> and the
* <code>allowNonRestoredState</code> flag has not been set.
* @throws IllegalStateException If the max parallelism changed for an operator
* that restores state from this checkpoint.
* @throws IllegalStateException If the parallelism changed for an operator
* that restores <i>non-partitioned</i> state from this
* checkpoint.
*/
public boolean restoreLatestCheckpointedState(
final Set<ExecutionJobVertex> tasks,
final boolean errorIfNoCheckpoint,
final boolean allowNonRestoredState) throws Exception {
synchronized (lock) {
if (shutdown) {
throw new IllegalStateException("CheckpointCoordinator is shut down");
}
sharedStateRegistry.close();
sharedStateRegistry = sharedStateRegistryFactory.create(executor);
completedCheckpointStore.recover();
for (CompletedCheckpoint completedCheckpoint : completedCheckpointStore.getAllCheckpoints()) {
completedCheckpoint.registerSharedStatesAfterRestored(sharedStateRegistry);
}
LOG.debug("Status of the shared state registry of job {} after restore: {}.", job, sharedStateRegistry);
CompletedCheckpoint latest = completedCheckpointStore.getLatestCheckpoint(isPreferCheckpointForRecovery);
if (latest == null) {
if (errorIfNoCheckpoint) {
throw new IllegalStateException("No completed checkpoint available");
} else {
LOG.debug("Resetting the master hooks.");
MasterHooks.reset(masterHooks.values(), LOG);
return false;
}
}
LOG.info("Restoring job {} from latest valid checkpoint: {}.", job, latest);
final Map<OperatorID, OperatorState> operatorStates = latest.getOperatorStates();
StateAssignmentOperation stateAssignmentOperation =
new StateAssignmentOperation(latest.getCheckpointID(), tasks, operatorStates, allowNonRestoredState);
stateAssignmentOperation.assignStates();
MasterHooks.restoreMasterHooks(
masterHooks,
latest.getMasterHookStates(),
latest.getCheckpointID(),
allowNonRestoredState,
LOG);
if (statsTracker != null) {
long restoreTimestamp = System.currentTimeMillis();
RestoredCheckpointStats restored = new RestoredCheckpointStats(
latest.getCheckpointID(),
latest.getProperties(),
restoreTimestamp,
latest.getExternalPointer());
statsTracker.reportRestoredCheckpoint(restored);
}
return true;
}
}
/**
* Restore the state with given savepoint.
*
* @param savepointPointer The pointer to the savepoint.
* @param allowNonRestored True if allowing checkpoint state that cannot be
* mapped to any job vertex in tasks.
* @param tasks Map of job vertices to restore. State for these
* vertices is restored via
* {@link Execution
* @param userClassLoader The class loader to resolve serialized classes in
* legacy savepoint versions.
*/
public boolean restoreSavepoint(
String savepointPointer,
boolean allowNonRestored,
Map<JobVertexID, ExecutionJobVertex> tasks,
ClassLoader userClassLoader) throws Exception {
Preconditions.checkNotNull(savepointPointer, "The savepoint path cannot be null.");
LOG.info("Starting job {} from savepoint {} ({})",
job, savepointPointer, (allowNonRestored ? "allowing non restored state" : ""));
final CompletedCheckpointStorageLocation checkpointLocation = checkpointStorage.resolveCheckpoint(savepointPointer);
CompletedCheckpoint savepoint = Checkpoints.loadAndValidateCheckpoint(
job, tasks, checkpointLocation, userClassLoader, allowNonRestored);
completedCheckpointStore.addCheckpoint(savepoint);
long nextCheckpointId = savepoint.getCheckpointID() + 1;
checkpointIdCounter.setCount(nextCheckpointId);
LOG.info("Reset the checkpoint ID of job {} to {}.", job, nextCheckpointId);
return restoreLatestCheckpointedState(new HashSet<>(tasks.values()), true, allowNonRestored);
}
public int getNumberOfPendingCheckpoints() {
return this.pendingCheckpoints.size();
}
public int getNumberOfRetainedSuccessfulCheckpoints() {
synchronized (lock) {
return completedCheckpointStore.getNumberOfRetainedCheckpoints();
}
}
public Map<Long, PendingCheckpoint> getPendingCheckpoints() {
synchronized (lock) {
return new HashMap<>(this.pendingCheckpoints);
}
}
public List<CompletedCheckpoint> getSuccessfulCheckpoints() throws Exception {
synchronized (lock) {
return completedCheckpointStore.getAllCheckpoints();
}
}
public CheckpointStorageCoordinatorView getCheckpointStorage() {
return checkpointStorage;
}
public CompletedCheckpointStore getCheckpointStore() {
return completedCheckpointStore;
}
public long getCheckpointTimeout() {
return checkpointTimeout;
}
public ArrayDeque<CheckpointTriggerRequest> getTriggerRequestQueue() {
return triggerRequestQueue;
}
public boolean isTriggering() {
return isTriggering;
}
@VisibleForTesting
boolean isCurrentPeriodicTriggerAvailable() {
return currentPeriodicTrigger != null;
}
/**
* Returns whether periodic checkpointing has been configured.
*
* @return <code>true</code> if periodic checkpoints have been configured.
*/
public boolean isPeriodicCheckpointingConfigured() {
return baseInterval != Long.MAX_VALUE;
}
public void startCheckpointScheduler() {
synchronized (lock) {
if (shutdown) {
throw new IllegalArgumentException("Checkpoint coordinator is shut down");
}
stopCheckpointScheduler();
periodicScheduling = true;
currentPeriodicTrigger = scheduleTriggerWithDelay(getRandomInitDelay());
}
}
public void stopCheckpointScheduler() {
synchronized (lock) {
periodicTriggeringSuspended = false;
periodicScheduling = false;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
final CheckpointException reason =
new CheckpointException(CheckpointFailureReason.CHECKPOINT_COORDINATOR_SUSPEND);
abortPendingAndQueuedCheckpoints(reason);
numUnsuccessfulCheckpointsTriggers.set(0);
}
}
/**
* Aborts all the pending checkpoints due to en exception.
* @param exception The exception.
*/
public void abortPendingCheckpoints(CheckpointException exception) {
synchronized (lock) {
abortPendingCheckpoints(ignored -> true, exception);
}
}
private void abortPendingCheckpoints(
Predicate<PendingCheckpoint> checkpointToFailPredicate,
CheckpointException exception) {
assert Thread.holdsLock(lock);
final PendingCheckpoint[] pendingCheckpointsToFail = pendingCheckpoints
.values()
.stream()
.filter(checkpointToFailPredicate)
.toArray(PendingCheckpoint[]::new);
for (PendingCheckpoint pendingCheckpoint : pendingCheckpointsToFail) {
abortPendingCheckpoint(pendingCheckpoint, exception);
}
}
/**
* If too many checkpoints are currently in progress, we need to mark that a request is queued.
*
* @throws CheckpointException If too many checkpoints are currently in progress.
*/
private void checkConcurrentCheckpoints() throws CheckpointException {
if (pendingCheckpoints.size() >= maxConcurrentCheckpointAttempts) {
periodicTriggeringSuspended = true;
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
throw new CheckpointException(CheckpointFailureReason.TOO_MANY_CONCURRENT_CHECKPOINTS);
}
}
/**
* Make sure the minimum interval between checkpoints has passed.
*
* @throws CheckpointException If the minimum interval between checkpoints has not passed.
*/
private void checkMinPauseBetweenCheckpoints() throws CheckpointException {
final long nextCheckpointTriggerRelativeTime =
lastCheckpointCompletionRelativeTime + minPauseBetweenCheckpoints;
final long durationTillNextMillis =
nextCheckpointTriggerRelativeTime - clock.relativeTimeMillis();
if (durationTillNextMillis > 0) {
if (currentPeriodicTrigger != null) {
currentPeriodicTrigger.cancel(false);
currentPeriodicTrigger = null;
}
currentPeriodicTrigger = scheduleTriggerWithDelay(durationTillNextMillis);
throw new CheckpointException(CheckpointFailureReason.MINIMUM_TIME_BETWEEN_CHECKPOINTS);
}
}
private long getRandomInitDelay() {
return ThreadLocalRandom.current().nextLong(minPauseBetweenCheckpoints, baseInterval + 1L);
}
private ScheduledFuture<?> scheduleTriggerWithDelay(long initDelay) {
return timer.scheduleAtFixedRate(
new ScheduledTrigger(),
initDelay, baseInterval, TimeUnit.MILLISECONDS);
}
public JobStatusListener createActivatorDeactivator() {
synchronized (lock) {
if (shutdown) {
throw new IllegalArgumentException("Checkpoint coordinator is shut down");
}
if (jobStatusListener == null) {
jobStatusListener = new CheckpointCoordinatorDeActivator(this);
}
return jobStatusListener;
}
}
private final class ScheduledTrigger implements Runnable {
@Override
public void run() {
try {
triggerCheckpoint(System.currentTimeMillis(), true);
}
catch (Exception e) {
LOG.error("Exception while triggering checkpoint for job {}.", job, e);
}
}
}
/**
* Discards the given state object asynchronously belonging to the given job, execution attempt
* id and checkpoint id.
*
* @param jobId identifying the job to which the state object belongs
* @param executionAttemptID identifying the task to which the state object belongs
* @param checkpointId of the state object
* @param subtaskState to discard asynchronously
*/
private void discardSubtaskState(
final JobID jobId,
final ExecutionAttemptID executionAttemptID,
final long checkpointId,
final TaskStateSnapshot subtaskState) {
if (subtaskState != null) {
executor.execute(new Runnable() {
@Override
public void run() {
try {
subtaskState.discardState();
} catch (Throwable t2) {
LOG.warn("Could not properly discard state object of checkpoint {} " +
"belonging to task {} of job {}.", checkpointId, executionAttemptID, jobId, t2);
}
}
});
}
}
private void abortPendingCheckpoint(
PendingCheckpoint pendingCheckpoint,
CheckpointException exception) {
abortPendingCheckpoint(pendingCheckpoint, exception, null);
}
private void abortPendingCheckpoint(
PendingCheckpoint pendingCheckpoint,
CheckpointException exception,
@Nullable final ExecutionAttemptID executionAttemptID) {
assert(Thread.holdsLock(lock));
if (!pendingCheckpoint.isDiscarded()) {
try {
pendingCheckpoint.abort(
exception.getCheckpointFailureReason(), exception.getCause());
if (pendingCheckpoint.getProps().isSavepoint() &&
pendingCheckpoint.getProps().isSynchronous()) {
failureManager.handleSynchronousSavepointFailure(exception);
} else if (executionAttemptID != null) {
failureManager.handleTaskLevelCheckpointException(
exception, pendingCheckpoint.getCheckpointId(), executionAttemptID);
} else {
failureManager.handleJobLevelCheckpointException(
exception, pendingCheckpoint.getCheckpointId());
}
} finally {
pendingCheckpoints.remove(pendingCheckpoint.getCheckpointId());
rememberRecentCheckpointId(pendingCheckpoint.getCheckpointId());
resumePeriodicTriggering();
}
}
}
private void preCheckBeforeTriggeringCheckpoint(boolean isPeriodic, boolean forceCheckpoint) throws CheckpointException {
preCheckGlobalState(isPeriodic);
if (!forceCheckpoint) {
checkConcurrentCheckpoints();
checkMinPauseBetweenCheckpoints();
}
}
private void preCheckGlobalState(boolean isPeriodic) throws CheckpointException {
if (shutdown) {
throw new CheckpointException(CheckpointFailureReason.CHECKPOINT_COORDINATOR_SHUTDOWN);
}
if (isPeriodic && !periodicScheduling) {
throw new CheckpointException(CheckpointFailureReason.PERIODIC_SCHEDULER_SHUTDOWN);
}
}
/**
* Check if all tasks that we need to trigger are running. If not, abort the checkpoint.
*
* @return the executions need to be triggered.
* @throws CheckpointException the exception fails checking
*/
private Execution[] getTriggerExecutions() throws CheckpointException {
Execution[] executions = new Execution[tasksToTrigger.length];
for (int i = 0; i < tasksToTrigger.length; i++) {
Execution ee = tasksToTrigger[i].getCurrentExecutionAttempt();
if (ee == null) {
LOG.info(
"Checkpoint triggering task {} of job {} is not being executed at the moment. Aborting checkpoint.",
tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
job);
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
} else if (ee.getState() == ExecutionState.RUNNING) {
executions[i] = ee;
} else {
LOG.info(
"Checkpoint triggering task {} of job {} is not in state {} but {} instead. Aborting checkpoint.",
tasksToTrigger[i].getTaskNameWithSubtaskIndex(),
job,
ExecutionState.RUNNING,
ee.getState());
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
return executions;
}
/**
* Check if all tasks that need to acknowledge the checkpoint are running.
* If not, abort the checkpoint
*
* @return the execution vertices which should give an ack response
* @throws CheckpointException the exception fails checking
*/
private Map<ExecutionAttemptID, ExecutionVertex> getAckTasks() throws CheckpointException {
Map<ExecutionAttemptID, ExecutionVertex> ackTasks = new HashMap<>(tasksToWaitFor.length);
for (ExecutionVertex ev : tasksToWaitFor) {
Execution ee = ev.getCurrentExecutionAttempt();
if (ee != null) {
ackTasks.put(ee.getAttemptId(), ev);
} else {
LOG.info(
"Checkpoint acknowledging task {} of job {} is not being executed at the moment. Aborting checkpoint.",
ev.getTaskNameWithSubtaskIndex(),
job);
throw new CheckpointException(
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
return ackTasks;
}
private void abortPendingAndQueuedCheckpoints(CheckpointException exception) {
assert(Thread.holdsLock(lock));
CheckpointTriggerRequest request;
while ((request = triggerRequestQueue.poll()) != null) {
request.onCompletionPromise.completeExceptionally(exception);
}
abortPendingCheckpoints(exception);
}
/**
* The canceller of checkpoint. The checkpoint might be cancelled if it doesn't finish in a
* configured period.
*/
private class CheckpointCanceller implements Runnable {
private final PendingCheckpoint pendingCheckpoint;
private CheckpointCanceller(PendingCheckpoint pendingCheckpoint) {
this.pendingCheckpoint = checkNotNull(pendingCheckpoint);
}
@Override
public void run() {
synchronized (lock) {
if (!pendingCheckpoint.isDiscarded()) {
LOG.info("Checkpoint {} of job {} expired before completing.",
pendingCheckpoint.getCheckpointId(), job);
abortPendingCheckpoint(
pendingCheckpoint,
new CheckpointException(CheckpointFailureReason.CHECKPOINT_EXPIRED));
}
}
}
}
private static CheckpointException getCheckpointException(
CheckpointFailureReason defaultReason, Throwable throwable) {
final Optional<CheckpointException> checkpointExceptionOptional =
ExceptionUtils.findThrowable(throwable, CheckpointException.class);
return checkpointExceptionOptional
.orElseGet(() -> new CheckpointException(defaultReason, throwable));
}
private static class CheckpointIdAndStorageLocation {
private final long checkpointId;
private final CheckpointStorageLocation checkpointStorageLocation;
CheckpointIdAndStorageLocation(
long checkpointId,
CheckpointStorageLocation checkpointStorageLocation) {
this.checkpointId = checkpointId;
this.checkpointStorageLocation = checkNotNull(checkpointStorageLocation);
}
}
private static class CheckpointTriggerRequest {
private final long timestamp;
private final CheckpointProperties props;
private final @Nullable String externalSavepointLocation;
private final boolean isPeriodic;
private final boolean advanceToEndOfTime;
private final CompletableFuture<CompletedCheckpoint> onCompletionPromise;
CheckpointTriggerRequest(
long timestamp,
CheckpointProperties props,
@Nullable String externalSavepointLocation,
boolean isPeriodic,
boolean advanceToEndOfTime,
CompletableFuture<CompletedCheckpoint> onCompletionPromise) {
this.timestamp = timestamp;
this.props = checkNotNull(props);
this.externalSavepointLocation = externalSavepointLocation;
this.isPeriodic = isPeriodic;
this.advanceToEndOfTime = advanceToEndOfTime;
this.onCompletionPromise = checkNotNull(onCompletionPromise);
}
}
}
|
Also I would prefer taking a snapshot of out.writerIndex() to be used for the check state condition and the output message to address any thread-safety concerns
|
void encode(final ByteBuf out) {
final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength();
final int start = out.writerIndex();
out.writeIntLE(expectedLength);
this.frame.encode(out);
this.headers.encode(out);
checkState(out.writerIndex() - start == expectedLength,
"encoding error: {\"expectedLength\": %s, \"observedLength\": %s}",
expectedLength,
out.writerIndex() - expectedLength);
if (this.payload.length > 0) {
out.writeIntLE(this.payload.length);
out.writeBytes(this.payload);
}
}
|
out.writerIndex() - expectedLength);
|
void encode(final ByteBuf out) {
final int expectedLength = RntbdRequestFrame.LENGTH + this.headers.computeLength();
final int start = out.writerIndex();
out.writeIntLE(expectedLength);
this.frame.encode(out);
this.headers.encode(out);
final int observedLength = out.writerIndex() - start;
checkState(observedLength == expectedLength,
"encoding error: {\"expectedLength\": %s, \"observedLength\": %s}",
expectedLength,
observedLength);
if (this.payload.length > 0) {
out.writeIntLE(this.payload.length);
out.writeBytes(this.payload);
}
}
|
class RntbdRequest {
private static final byte[] EMPTY_BYTE_ARRAY = {};
private final RntbdRequestFrame frame;
private final RntbdRequestHeaders headers;
private final byte[] payload;
private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) {
checkNotNull(frame, "frame");
checkNotNull(headers, "headers");
this.frame = frame;
this.headers = headers;
this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload;
}
public UUID getActivityId() {
return this.frame.getActivityId();
}
@JsonIgnore
@SuppressWarnings("unchecked")
public <T> T getHeader(final RntbdRequestHeader header) {
return (T) this.headers.get(header).getValue();
}
public Long getTransportRequestId() {
return this.getHeader(RntbdRequestHeader.TransportRequestID);
}
public static RntbdRequest decode(final ByteBuf in) {
final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES);
if (resourceOperationCode == 0) {
final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode);
throw new IllegalStateException(reason);
}
final int start = in.readerIndex();
final int expectedLength = in.readIntLE();
final RntbdRequestFrame header = RntbdRequestFrame.decode(in);
final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in);
final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start));
final int observedLength = in.readerIndex() - start;
if (observedLength != expectedLength) {
final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength);
throw new IllegalStateException(reason);
}
final byte[] payload = new byte[payloadBuf.readableBytes()];
payloadBuf.readBytes(payload);
in.discardReadBytes();
return new RntbdRequest(header, metadata, payload);
}
public static RntbdRequest from(final RntbdRequestArgs args) {
final RxDocumentServiceRequest serviceRequest = args.serviceRequest();
final RntbdRequestFrame frame = new RntbdRequestFrame(
args.activityId(),
serviceRequest.getOperationType(),
serviceRequest.getResourceType());
final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame);
return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray());
}
}
|
class RntbdRequest {
private static final byte[] EMPTY_BYTE_ARRAY = {};
private final RntbdRequestFrame frame;
private final RntbdRequestHeaders headers;
private final byte[] payload;
private RntbdRequest(final RntbdRequestFrame frame, final RntbdRequestHeaders headers, final byte[] payload) {
checkNotNull(frame, "frame");
checkNotNull(headers, "headers");
this.frame = frame;
this.headers = headers;
this.payload = payload == null ? EMPTY_BYTE_ARRAY : payload;
}
public UUID getActivityId() {
return this.frame.getActivityId();
}
@JsonIgnore
@SuppressWarnings("unchecked")
public <T> T getHeader(final RntbdRequestHeader header) {
return (T) this.headers.get(header).getValue();
}
public Long getTransportRequestId() {
return this.getHeader(RntbdRequestHeader.TransportRequestID);
}
public static RntbdRequest decode(final ByteBuf in) {
final int resourceOperationCode = in.getInt(in.readerIndex() + Integer.BYTES);
if (resourceOperationCode == 0) {
final String reason = String.format("resourceOperationCode=0x%08X", resourceOperationCode);
throw new IllegalStateException(reason);
}
final int start = in.readerIndex();
final int expectedLength = in.readIntLE();
final RntbdRequestFrame header = RntbdRequestFrame.decode(in);
final RntbdRequestHeaders metadata = RntbdRequestHeaders.decode(in);
final ByteBuf payloadBuf = in.readSlice(expectedLength - (in.readerIndex() - start));
final int observedLength = in.readerIndex() - start;
if (observedLength != expectedLength) {
final String reason = Strings.lenientFormat("expectedLength=%s, observedLength=%s", expectedLength, observedLength);
throw new IllegalStateException(reason);
}
final byte[] payload = new byte[payloadBuf.readableBytes()];
payloadBuf.readBytes(payload);
in.discardReadBytes();
return new RntbdRequest(header, metadata, payload);
}
public static RntbdRequest from(final RntbdRequestArgs args) {
final RxDocumentServiceRequest serviceRequest = args.serviceRequest();
final RntbdRequestFrame frame = new RntbdRequestFrame(
args.activityId(),
serviceRequest.getOperationType(),
serviceRequest.getResourceType());
final RntbdRequestHeaders headers = new RntbdRequestHeaders(args, frame);
return new RntbdRequest(frame, headers, serviceRequest.getContentAsByteArray());
}
}
|
Got it. We can always replace deliveryPolicy with `standardRulesEngineRuleMap`.
|
public Mono<CdnEndpoint> updateResourceAsync() {
final CdnEndpointImpl self = this;
EndpointUpdateParameters endpointUpdateParameters = new EndpointUpdateParameters();
endpointUpdateParameters.withIsHttpAllowed(this.innerModel().isHttpAllowed())
.withIsHttpsAllowed(this.innerModel().isHttpsAllowed())
.withOriginPath(this.innerModel().originPath())
.withOriginHostHeader(this.innerModel().originHostHeader())
.withIsCompressionEnabled(this.innerModel().isCompressionEnabled())
.withContentTypesToCompress(this.innerModel().contentTypesToCompress())
.withGeoFilters(this.innerModel().geoFilters())
.withOptimizationType(this.innerModel().optimizationType())
.withQueryStringCachingBehavior(this.innerModel().queryStringCachingBehavior())
.withTags(this.innerModel().tags());
if (isStandardMicrosoftSku()) {
List<DeliveryRule> rules = this.standardRulesEngineRuleMap.values()
.stream()
.sorted(Comparator.comparingInt(DeliveryRule::order))
.collect(Collectors.toList());
if (innerModel().deliveryPolicy() == null && !CoreUtils.isNullOrEmpty(this.standardRulesEngineRuleMap)) {
endpointUpdateParameters.withDeliveryPolicy(
new EndpointPropertiesUpdateParametersDeliveryPolicy()
.withRules(rules));
} else if (innerModel().deliveryPolicy() != null) {
endpointUpdateParameters.withDeliveryPolicy(
innerModel().deliveryPolicy()
.withRules(rules));
}
}
DeepCreatedOrigin originInner = this.innerModel().origins().get(0);
OriginUpdateParameters originUpdateParameters = new OriginUpdateParameters()
.withHostname(originInner.hostname())
.withHttpPort(originInner.httpPort())
.withHttpsPort(originInner.httpsPort());
Mono<EndpointInner> originUpdateTask = this.parent().manager().serviceClient().getOrigins().updateAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
originInner.name(),
originUpdateParameters)
.then(Mono.empty());
Mono<EndpointInner> endpointUpdateTask = this.parent().manager().serviceClient().getEndpoints().updateAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
endpointUpdateParameters);
Flux<CustomDomainInner> customDomainCreateTask = Flux.fromIterable(this.customDomainList)
.flatMapDelayError(itemToCreate -> this.parent().manager().serviceClient().getCustomDomains().createAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
self.parent().manager().resourceManager().internalContext()
.randomResourceName("CustomDomain", 50),
new CustomDomainParameters().withHostname(itemToCreate.hostname())
), 32, 32);
Flux<CustomDomainInner> customDomainDeleteTask = Flux.fromIterable(this.deletedCustomDomainList)
.flatMapDelayError(itemToDelete -> this.parent().manager().serviceClient().getCustomDomains().deleteAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
itemToDelete.name()
), 32, 32);
Mono<EndpointInner> customDomainTask = Flux.concat(customDomainCreateTask, customDomainDeleteTask)
.then(Mono.empty());
return Flux.mergeDelayError(32, customDomainTask, originUpdateTask, endpointUpdateTask)
.last()
.map(inner -> {
self.setInner(inner);
self.customDomainList.clear();
self.deletedCustomDomainList.clear();
return self;
});
}
|
}
|
public Mono<CdnEndpoint> updateResourceAsync() {
final CdnEndpointImpl self = this;
EndpointUpdateParameters endpointUpdateParameters = new EndpointUpdateParameters();
endpointUpdateParameters.withIsHttpAllowed(this.innerModel().isHttpAllowed())
.withIsHttpsAllowed(this.innerModel().isHttpsAllowed())
.withOriginPath(this.innerModel().originPath())
.withOriginHostHeader(this.innerModel().originHostHeader())
.withIsCompressionEnabled(this.innerModel().isCompressionEnabled())
.withContentTypesToCompress(this.innerModel().contentTypesToCompress())
.withGeoFilters(this.innerModel().geoFilters())
.withOptimizationType(this.innerModel().optimizationType())
.withQueryStringCachingBehavior(this.innerModel().queryStringCachingBehavior())
.withTags(this.innerModel().tags());
if (isStandardMicrosoftSku()) {
List<DeliveryRule> rules = this.standardRulesEngineRuleMap.values()
.stream()
.sorted(Comparator.comparingInt(DeliveryRule::order))
.collect(Collectors.toList());
ensureDeliveryPolicy();
endpointUpdateParameters.withDeliveryPolicy(
new EndpointPropertiesUpdateParametersDeliveryPolicy()
.withRules(rules));
}
DeepCreatedOrigin originInner = this.innerModel().origins().get(0);
OriginUpdateParameters originUpdateParameters = new OriginUpdateParameters()
.withHostname(originInner.hostname())
.withHttpPort(originInner.httpPort())
.withHttpsPort(originInner.httpsPort());
Mono<EndpointInner> originUpdateTask = this.parent().manager().serviceClient().getOrigins().updateAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
originInner.name(),
originUpdateParameters)
.then(Mono.empty());
Mono<EndpointInner> endpointUpdateTask = this.parent().manager().serviceClient().getEndpoints().updateAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
endpointUpdateParameters);
Flux<CustomDomainInner> customDomainCreateTask = Flux.fromIterable(this.customDomainList)
.flatMapDelayError(itemToCreate -> this.parent().manager().serviceClient().getCustomDomains().createAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
self.parent().manager().resourceManager().internalContext()
.randomResourceName("CustomDomain", 50),
new CustomDomainParameters().withHostname(itemToCreate.hostname())
), 32, 32);
Flux<CustomDomainInner> customDomainDeleteTask = Flux.fromIterable(this.deletedCustomDomainList)
.flatMapDelayError(itemToDelete -> this.parent().manager().serviceClient().getCustomDomains().deleteAsync(
this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
itemToDelete.name()
), 32, 32);
Mono<EndpointInner> customDomainTask = Flux.concat(customDomainCreateTask, customDomainDeleteTask)
.then(Mono.empty());
return Flux.mergeDelayError(32, customDomainTask, originUpdateTask, endpointUpdateTask)
.last()
.map(inner -> {
self.setInner(inner);
self.customDomainList.clear();
self.deletedCustomDomainList.clear();
return self;
});
}
|
class CdnEndpointImpl
extends ExternalChildResourceImpl<
CdnEndpoint,
EndpointInner,
CdnProfileImpl,
CdnProfile>
implements CdnEndpoint,
CdnEndpoint.DefinitionStages.Blank.StandardEndpoint<CdnProfile.DefinitionStages.WithStandardCreate>,
CdnEndpoint.DefinitionStages.Blank.PremiumEndpoint<CdnProfile.DefinitionStages.WithPremiumVerizonCreate>,
CdnEndpoint.DefinitionStages.WithStandardAttach<CdnProfile.DefinitionStages.WithStandardCreate>,
CdnEndpoint.DefinitionStages.WithPremiumAttach<CdnProfile.DefinitionStages.WithPremiumVerizonCreate>,
CdnEndpoint.UpdateDefinitionStages.Blank.StandardEndpoint<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.Blank.PremiumEndpoint<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.WithStandardAttach<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.WithPremiumAttach<CdnProfile.Update>,
CdnEndpoint.UpdateStandardEndpoint,
CdnEndpoint.UpdatePremiumEndpoint {
private List<CustomDomainInner> customDomainList;
private List<CustomDomainInner> deletedCustomDomainList;
private final Map<String, DeliveryRule> standardRulesEngineRuleMap = new HashMap<>();
CdnEndpointImpl(String name, CdnProfileImpl parent, EndpointInner inner) {
super(name, parent, inner);
this.customDomainList = new ArrayList<>();
this.deletedCustomDomainList = new ArrayList<>();
initializeRuleMapForStandardMicrosoftSku();
}
@Override
public String id() {
return this.innerModel().id();
}
@Override
public Mono<CdnEndpoint> createResourceAsync() {
final CdnEndpointImpl self = this;
if (isStandardMicrosoftSku()
&& this.innerModel().deliveryPolicy() == null
&& this.standardRulesEngineRuleMap.size() > 0) {
this.innerModel().withDeliveryPolicy(new EndpointPropertiesUpdateParametersDeliveryPolicy()
.withRules(this.standardRulesEngineRuleMap.values()
.stream()
.sorted(Comparator.comparingInt(DeliveryRule::order))
.collect(Collectors.toList())));
}
return this.parent().manager().serviceClient().getEndpoints().createAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
this.innerModel())
.flatMap(inner -> {
self.setInner(inner);
return Flux.fromIterable(self.customDomainList)
.flatMapDelayError(customDomainInner -> self.parent().manager().serviceClient()
.getCustomDomains().createAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name(),
self.parent().manager().resourceManager().internalContext()
.randomResourceName("CustomDomain", 50),
new CustomDomainParameters().withHostname(customDomainInner.hostname())), 32, 32)
.then(self.parent().manager().serviceClient()
.getCustomDomains().listByEndpointAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name())
.collectList()
.map(customDomainInners -> {
self.customDomainList.addAll(customDomainInners);
return self;
}));
});
}
@Override
@Override
public Mono<Void> deleteResourceAsync() {
return this.parent().manager().serviceClient().getEndpoints().deleteAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name());
}
@Override
public Mono<CdnEndpoint> refreshAsync() {
final CdnEndpointImpl self = this;
return super.refreshAsync()
.flatMap(cdnEndpoint -> {
self.customDomainList.clear();
self.deletedCustomDomainList.clear();
initializeRuleMapForStandardMicrosoftSku();
return self.parent().manager().serviceClient().getCustomDomains().listByEndpointAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name()
)
.collectList()
.map(customDomainInners -> {
self.customDomainList.addAll(customDomainInners);
return self;
});
});
}
@Override
protected Mono<EndpointInner> getInnerAsync() {
return this.parent().manager().serviceClient().getEndpoints().getAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name());
}
@Override
public PagedIterable<ResourceUsage> listResourceUsage() {
return PagedConverter.mapPage(this.parent().manager().serviceClient().getEndpoints().listResourceUsage(
this.parent().resourceGroupName(),
this.parent().name(),
this.name()),
ResourceUsage::new);
}
@Override
public Map<String, DeliveryRule> standardRulesEngineRules() {
return Collections.unmodifiableMap(this.standardRulesEngineRuleMap);
}
@Override
public CdnProfileImpl attach() {
return this.parent();
}
@Override
public String originHostHeader() {
return this.innerModel().originHostHeader();
}
@Override
public String originPath() {
return this.innerModel().originPath();
}
@Override
public Set<String> contentTypesToCompress() {
List<String> contentTypes = this.innerModel().contentTypesToCompress();
Set<String> set = new HashSet<>();
if (contentTypes != null) {
set.addAll(contentTypes);
}
return Collections.unmodifiableSet(set);
}
@Override
public boolean isCompressionEnabled() {
return this.innerModel().isCompressionEnabled();
}
@Override
public boolean isHttpAllowed() {
return this.innerModel().isHttpAllowed();
}
@Override
public boolean isHttpsAllowed() {
return this.innerModel().isHttpsAllowed();
}
@Override
public QueryStringCachingBehavior queryStringCachingBehavior() {
return this.innerModel().queryStringCachingBehavior();
}
@Override
public String optimizationType() {
if (this.innerModel().optimizationType() == null) {
return null;
}
return this.innerModel().optimizationType().toString();
}
@Override
public List<GeoFilter> geoFilters() {
return this.innerModel().geoFilters();
}
@Override
public String hostname() {
return this.innerModel().hostname();
}
@Override
public EndpointResourceState resourceState() {
return this.innerModel().resourceState();
}
@Override
public String provisioningState() {
return this.innerModel().provisioningState() == null ? null : this.innerModel().provisioningState().toString();
}
@Override
public String originHostName() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
return this.innerModel().origins().get(0).hostname();
}
return null;
}
@Override
public int httpPort() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
Integer httpPort = this.innerModel().origins().get(0).httpPort();
return (httpPort != null) ? httpPort : 0;
}
return 0;
}
@Override
public int httpsPort() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
Integer httpsPort = this.innerModel().origins().get(0).httpsPort();
return (httpsPort != null) ? httpsPort : 0;
}
return 0;
}
@Override
public Set<String> customDomains() {
Set<String> set = new HashSet<>();
for (CustomDomainInner customDomainInner : this.parent().manager().serviceClient().getCustomDomains()
.listByEndpoint(this.parent().resourceGroupName(), this.parent().name(), this.name())) {
set.add(customDomainInner.hostname());
}
return Collections.unmodifiableSet(set);
}
@Override
public void start() {
this.parent().startEndpoint(this.name());
}
@Override
public Mono<Void> startAsync() {
return this.parent().startEndpointAsync(this.name());
}
@Override
public void stop() {
this.stopAsync().block();
}
@Override
public Mono<Void> stopAsync() {
return this.parent().stopEndpointAsync(this.name());
}
@Override
public void purgeContent(Set<String> contentPaths) {
if (contentPaths != null) {
this.purgeContentAsync(contentPaths).block();
}
}
@Override
public Mono<Void> purgeContentAsync(Set<String> contentPaths) {
return this.parent().purgeEndpointContentAsync(this.name(), contentPaths);
}
@Override
public void loadContent(Set<String> contentPaths) {
this.loadContentAsync(contentPaths).block();
}
@Override
public Mono<Void> loadContentAsync(Set<String> contentPaths) {
return this.parent().loadEndpointContentAsync(this.name(), contentPaths);
}
@Override
public CustomDomainValidationResult validateCustomDomain(String hostName) {
return this.validateCustomDomainAsync(hostName).block();
}
@Override
public Mono<CustomDomainValidationResult> validateCustomDomainAsync(String hostName) {
return this.parent().validateEndpointCustomDomainAsync(this.name(), hostName);
}
@Override
public CdnEndpointImpl withOrigin(String originName, String hostname) {
this.innerModel().origins().add(
new DeepCreatedOrigin()
.withName(originName)
.withHostname(hostname));
return this;
}
@Override
public CdnEndpointImpl withOrigin(String hostname) {
return this.withOrigin("origin", hostname);
}
@Override
public CdnEndpointImpl withPremiumOrigin(String originName, String hostname) {
return this.withOrigin(originName, hostname);
}
@Override
public CdnEndpointImpl withPremiumOrigin(String hostname) {
return this.withOrigin(hostname);
}
@Override
public CdnEndpointImpl withOriginPath(String originPath) {
this.innerModel().withOriginPath(originPath);
return this;
}
@Override
public CdnEndpointImpl withHttpAllowed(boolean httpAllowed) {
this.innerModel().withIsHttpAllowed(httpAllowed);
return this;
}
@Override
public CdnEndpointImpl withHttpsAllowed(boolean httpsAllowed) {
this.innerModel().withIsHttpsAllowed(httpsAllowed);
return this;
}
@Override
public CdnEndpointImpl withHttpPort(int httpPort) {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
this.innerModel().origins().get(0).withHttpPort(httpPort);
}
return this;
}
@Override
public CdnEndpointImpl withHttpsPort(int httpsPort) {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
this.innerModel().origins().get(0).withHttpsPort(httpsPort);
}
return this;
}
@Override
public CdnEndpointImpl withHostHeader(String hostHeader) {
this.innerModel().withOriginHostHeader(hostHeader);
return this;
}
@Override
public CdnEndpointImpl withContentTypesToCompress(Set<String> contentTypesToCompress) {
List<String> list = null;
if (contentTypesToCompress != null) {
list = new ArrayList<>(contentTypesToCompress);
}
this.innerModel().withContentTypesToCompress(list);
return this;
}
@Override
public CdnEndpointImpl withoutContentTypesToCompress() {
if (this.innerModel().contentTypesToCompress() != null) {
this.innerModel().contentTypesToCompress().clear();
}
return this;
}
@Override
public CdnEndpointImpl withContentTypeToCompress(String contentTypeToCompress) {
if (this.innerModel().contentTypesToCompress() == null) {
this.innerModel().withContentTypesToCompress(new ArrayList<>());
}
this.innerModel().contentTypesToCompress().add(contentTypeToCompress);
return this;
}
@Override
public CdnEndpointImpl withoutContentTypeToCompress(String contentTypeToCompress) {
if (this.innerModel().contentTypesToCompress() != null) {
this.innerModel().contentTypesToCompress().remove(contentTypeToCompress);
}
return this;
}
@Override
public CdnEndpointImpl withCompressionEnabled(boolean compressionEnabled) {
this.innerModel().withIsCompressionEnabled(compressionEnabled);
return this;
}
@Override
public CdnEndpointImpl withQueryStringCachingBehavior(QueryStringCachingBehavior cachingBehavior) {
this.innerModel().withQueryStringCachingBehavior(cachingBehavior);
return this;
}
@Override
public CdnEndpointImpl withGeoFilters(Collection<GeoFilter> geoFilters) {
List<GeoFilter> list = null;
if (geoFilters != null) {
list = new ArrayList<>(geoFilters);
}
this.innerModel().withGeoFilters(list);
return this;
}
@Override
public CdnEndpointImpl withoutGeoFilters() {
if (this.innerModel().geoFilters() != null) {
this.innerModel().geoFilters().clear();
}
return this;
}
@Override
public CdnEndpointImpl withGeoFilter(String relativePath, GeoFilterActions action, CountryIsoCode countryCode) {
GeoFilter geoFilter = this.createGeoFiltersObject(relativePath, action);
if (geoFilter.countryCodes() == null) {
geoFilter.withCountryCodes(new ArrayList<>());
}
geoFilter.countryCodes().add(countryCode.toString());
this.innerModel().geoFilters().add(geoFilter);
return this;
}
@Override
public CdnEndpointImpl withGeoFilter(
String relativePath, GeoFilterActions action, Collection<CountryIsoCode> countryCodes) {
GeoFilter geoFilter = this.createGeoFiltersObject(relativePath, action);
if (geoFilter.countryCodes() == null) {
geoFilter.withCountryCodes(new ArrayList<>());
} else {
geoFilter.countryCodes().clear();
}
for (CountryIsoCode countryCode : countryCodes) {
geoFilter.countryCodes().add(countryCode.toString());
}
this.innerModel().geoFilters().add(geoFilter);
return this;
}
@Override
public CdnEndpointImpl withoutGeoFilter(String relativePath) {
this.innerModel().geoFilters().removeIf(geoFilter -> geoFilter.relativePath().equals(relativePath));
return this;
}
@Override
public CdnEndpointImpl withCustomDomain(String hostName) {
this.customDomainList.add(new CustomDomainInner().withHostname(hostName));
return this;
}
@Override
@SuppressWarnings("unchecked")
public CdnStandardRulesEngineRuleImpl defineNewStandardRulesEngineRule(String name) {
if (!isStandardMicrosoftSku()) {
throw new IllegalStateException(String.format("Standard rules engine only supports for Standard Microsoft SKU, "
+ "current SKU is %s", parent().sku().name()));
}
CdnStandardRulesEngineRuleImpl deliveryRule = new CdnStandardRulesEngineRuleImpl(this, name);
this.standardRulesEngineRuleMap.put(name, deliveryRule.innerModel());
return deliveryRule;
}
@Override
@SuppressWarnings("unchecked")
public CdnStandardRulesEngineRuleImpl updateStandardRulesEngineRule(String name) {
if (!isStandardMicrosoftSku()) {
throw new IllegalStateException(String.format("Standard rules engine only supports for Standard Microsoft SKU, "
+ "current SKU is %s", parent().sku().name()));
}
return new CdnStandardRulesEngineRuleImpl(this, standardRulesEngineRules().get(name));
}
@Override
public CdnEndpointImpl withoutStandardRulesEngineRule(String name) {
if (!isStandardMicrosoftSku()) {
throw new IllegalStateException(String.format("Standard rules engine only supports for Standard Microsoft SKU, "
+ "current SKU is %s", parent().sku().name()));
}
this.standardRulesEngineRuleMap.remove(name);
return this;
}
@Override
public CdnEndpointImpl withoutCustomDomain(String hostName) {
deletedCustomDomainList.add(new CustomDomainInner().withHostname(hostName));
return this;
}
private GeoFilter createGeoFiltersObject(String relativePath, GeoFilterActions action) {
if (this.innerModel().geoFilters() == null) {
this.innerModel().withGeoFilters(new ArrayList<>());
}
GeoFilter geoFilter = null;
for (GeoFilter filter : this.innerModel().geoFilters()) {
if (filter.relativePath().equals(relativePath)) {
geoFilter = filter;
break;
}
}
if (geoFilter == null) {
geoFilter = new GeoFilter();
} else {
this.innerModel().geoFilters().remove(geoFilter);
}
geoFilter.withRelativePath(relativePath)
.withAction(action);
return geoFilter;
}
private void initializeRuleMapForStandardMicrosoftSku() {
standardRulesEngineRuleMap.clear();
if (isStandardMicrosoftSku()
&& innerModel().deliveryPolicy() != null
&& innerModel().deliveryPolicy().rules() != null) {
for (DeliveryRule rule : innerModel().deliveryPolicy().rules()) {
this.standardRulesEngineRuleMap.put(rule.name(), rule);
}
}
}
private boolean isStandardMicrosoftSku() {
return SkuName.STANDARD_MICROSOFT.equals(parent().sku().name());
}
}
|
class CdnEndpointImpl
extends ExternalChildResourceImpl<
CdnEndpoint,
EndpointInner,
CdnProfileImpl,
CdnProfile>
implements CdnEndpoint,
CdnEndpoint.DefinitionStages.Blank.StandardEndpoint<CdnProfile.DefinitionStages.WithStandardCreate>,
CdnEndpoint.DefinitionStages.Blank.PremiumEndpoint<CdnProfile.DefinitionStages.WithPremiumVerizonCreate>,
CdnEndpoint.DefinitionStages.WithStandardAttach<CdnProfile.DefinitionStages.WithStandardCreate>,
CdnEndpoint.DefinitionStages.WithPremiumAttach<CdnProfile.DefinitionStages.WithPremiumVerizonCreate>,
CdnEndpoint.UpdateDefinitionStages.Blank.StandardEndpoint<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.Blank.PremiumEndpoint<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.WithStandardAttach<CdnProfile.Update>,
CdnEndpoint.UpdateDefinitionStages.WithPremiumAttach<CdnProfile.Update>,
CdnEndpoint.UpdateStandardEndpoint,
CdnEndpoint.UpdatePremiumEndpoint {
private List<CustomDomainInner> customDomainList;
private List<CustomDomainInner> deletedCustomDomainList;
private final Map<String, DeliveryRule> standardRulesEngineRuleMap = new HashMap<>();
CdnEndpointImpl(String name, CdnProfileImpl parent, EndpointInner inner) {
super(name, parent, inner);
this.customDomainList = new ArrayList<>();
this.deletedCustomDomainList = new ArrayList<>();
initializeRuleMapForStandardMicrosoftSku();
}
@Override
public String id() {
return this.innerModel().id();
}
@Override
public Mono<CdnEndpoint> createResourceAsync() {
final CdnEndpointImpl self = this;
if (isStandardMicrosoftSku()
&& this.innerModel().deliveryPolicy() == null
&& this.standardRulesEngineRuleMap.size() > 0) {
this.innerModel().withDeliveryPolicy(new EndpointPropertiesUpdateParametersDeliveryPolicy()
.withRules(this.standardRulesEngineRuleMap.values()
.stream()
.sorted(Comparator.comparingInt(DeliveryRule::order))
.collect(Collectors.toList())));
}
return this.parent().manager().serviceClient().getEndpoints().createAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name(),
this.innerModel())
.flatMap(inner -> {
self.setInner(inner);
return Flux.fromIterable(self.customDomainList)
.flatMapDelayError(customDomainInner -> self.parent().manager().serviceClient()
.getCustomDomains().createAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name(),
self.parent().manager().resourceManager().internalContext()
.randomResourceName("CustomDomain", 50),
new CustomDomainParameters().withHostname(customDomainInner.hostname())), 32, 32)
.then(self.parent().manager().serviceClient()
.getCustomDomains().listByEndpointAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name())
.collectList()
.map(customDomainInners -> {
self.customDomainList.addAll(customDomainInners);
return self;
}));
});
}
@Override
@Override
public Mono<Void> deleteResourceAsync() {
return this.parent().manager().serviceClient().getEndpoints().deleteAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name());
}
@Override
public Mono<CdnEndpoint> refreshAsync() {
final CdnEndpointImpl self = this;
return super.refreshAsync()
.flatMap(cdnEndpoint -> {
self.customDomainList.clear();
self.deletedCustomDomainList.clear();
initializeRuleMapForStandardMicrosoftSku();
return self.parent().manager().serviceClient().getCustomDomains().listByEndpointAsync(
self.parent().resourceGroupName(),
self.parent().name(),
self.name()
)
.collectList()
.map(customDomainInners -> {
self.customDomainList.addAll(customDomainInners);
return self;
});
});
}
@Override
protected Mono<EndpointInner> getInnerAsync() {
return this.parent().manager().serviceClient().getEndpoints().getAsync(this.parent().resourceGroupName(),
this.parent().name(),
this.name());
}
@Override
public PagedIterable<ResourceUsage> listResourceUsage() {
return PagedConverter.mapPage(this.parent().manager().serviceClient().getEndpoints().listResourceUsage(
this.parent().resourceGroupName(),
this.parent().name(),
this.name()),
ResourceUsage::new);
}
@Override
public Map<String, DeliveryRule> standardRulesEngineRules() {
return Collections.unmodifiableMap(this.standardRulesEngineRuleMap);
}
@Override
public CdnProfileImpl attach() {
return this.parent();
}
@Override
public String originHostHeader() {
return this.innerModel().originHostHeader();
}
@Override
public String originPath() {
return this.innerModel().originPath();
}
@Override
public Set<String> contentTypesToCompress() {
List<String> contentTypes = this.innerModel().contentTypesToCompress();
Set<String> set = new HashSet<>();
if (contentTypes != null) {
set.addAll(contentTypes);
}
return Collections.unmodifiableSet(set);
}
@Override
public boolean isCompressionEnabled() {
return this.innerModel().isCompressionEnabled();
}
@Override
public boolean isHttpAllowed() {
return this.innerModel().isHttpAllowed();
}
@Override
public boolean isHttpsAllowed() {
return this.innerModel().isHttpsAllowed();
}
@Override
public QueryStringCachingBehavior queryStringCachingBehavior() {
return this.innerModel().queryStringCachingBehavior();
}
@Override
public String optimizationType() {
if (this.innerModel().optimizationType() == null) {
return null;
}
return this.innerModel().optimizationType().toString();
}
@Override
public List<GeoFilter> geoFilters() {
return this.innerModel().geoFilters();
}
@Override
public String hostname() {
return this.innerModel().hostname();
}
@Override
public EndpointResourceState resourceState() {
return this.innerModel().resourceState();
}
@Override
public String provisioningState() {
return this.innerModel().provisioningState() == null ? null : this.innerModel().provisioningState().toString();
}
@Override
public String originHostName() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
return this.innerModel().origins().get(0).hostname();
}
return null;
}
@Override
public int httpPort() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
Integer httpPort = this.innerModel().origins().get(0).httpPort();
return (httpPort != null) ? httpPort : 0;
}
return 0;
}
@Override
public int httpsPort() {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
Integer httpsPort = this.innerModel().origins().get(0).httpsPort();
return (httpsPort != null) ? httpsPort : 0;
}
return 0;
}
@Override
public Set<String> customDomains() {
Set<String> set = new HashSet<>();
for (CustomDomainInner customDomainInner : this.parent().manager().serviceClient().getCustomDomains()
.listByEndpoint(this.parent().resourceGroupName(), this.parent().name(), this.name())) {
set.add(customDomainInner.hostname());
}
return Collections.unmodifiableSet(set);
}
@Override
public void start() {
this.parent().startEndpoint(this.name());
}
@Override
public Mono<Void> startAsync() {
return this.parent().startEndpointAsync(this.name());
}
@Override
public void stop() {
this.stopAsync().block();
}
@Override
public Mono<Void> stopAsync() {
return this.parent().stopEndpointAsync(this.name());
}
@Override
public void purgeContent(Set<String> contentPaths) {
if (contentPaths != null) {
this.purgeContentAsync(contentPaths).block();
}
}
@Override
public Mono<Void> purgeContentAsync(Set<String> contentPaths) {
return this.parent().purgeEndpointContentAsync(this.name(), contentPaths);
}
@Override
public void loadContent(Set<String> contentPaths) {
this.loadContentAsync(contentPaths).block();
}
@Override
public Mono<Void> loadContentAsync(Set<String> contentPaths) {
return this.parent().loadEndpointContentAsync(this.name(), contentPaths);
}
@Override
public CustomDomainValidationResult validateCustomDomain(String hostName) {
return this.validateCustomDomainAsync(hostName).block();
}
@Override
public Mono<CustomDomainValidationResult> validateCustomDomainAsync(String hostName) {
return this.parent().validateEndpointCustomDomainAsync(this.name(), hostName);
}
@Override
public CdnEndpointImpl withOrigin(String originName, String hostname) {
this.innerModel().origins().add(
new DeepCreatedOrigin()
.withName(originName)
.withHostname(hostname));
return this;
}
@Override
public CdnEndpointImpl withOrigin(String hostname) {
return this.withOrigin("origin", hostname);
}
@Override
public CdnEndpointImpl withPremiumOrigin(String originName, String hostname) {
return this.withOrigin(originName, hostname);
}
@Override
public CdnEndpointImpl withPremiumOrigin(String hostname) {
return this.withOrigin(hostname);
}
@Override
public CdnEndpointImpl withOriginPath(String originPath) {
this.innerModel().withOriginPath(originPath);
return this;
}
@Override
public CdnEndpointImpl withHttpAllowed(boolean httpAllowed) {
this.innerModel().withIsHttpAllowed(httpAllowed);
return this;
}
@Override
public CdnEndpointImpl withHttpsAllowed(boolean httpsAllowed) {
this.innerModel().withIsHttpsAllowed(httpsAllowed);
return this;
}
@Override
public CdnEndpointImpl withHttpPort(int httpPort) {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
this.innerModel().origins().get(0).withHttpPort(httpPort);
}
return this;
}
@Override
public CdnEndpointImpl withHttpsPort(int httpsPort) {
if (this.innerModel().origins() != null && !this.innerModel().origins().isEmpty()) {
this.innerModel().origins().get(0).withHttpsPort(httpsPort);
}
return this;
}
@Override
public CdnEndpointImpl withHostHeader(String hostHeader) {
this.innerModel().withOriginHostHeader(hostHeader);
return this;
}
@Override
public CdnEndpointImpl withContentTypesToCompress(Set<String> contentTypesToCompress) {
List<String> list = null;
if (contentTypesToCompress != null) {
list = new ArrayList<>(contentTypesToCompress);
}
this.innerModel().withContentTypesToCompress(list);
return this;
}
@Override
public CdnEndpointImpl withoutContentTypesToCompress() {
if (this.innerModel().contentTypesToCompress() != null) {
this.innerModel().contentTypesToCompress().clear();
}
return this;
}
@Override
public CdnEndpointImpl withContentTypeToCompress(String contentTypeToCompress) {
if (this.innerModel().contentTypesToCompress() == null) {
this.innerModel().withContentTypesToCompress(new ArrayList<>());
}
this.innerModel().contentTypesToCompress().add(contentTypeToCompress);
return this;
}
@Override
public CdnEndpointImpl withoutContentTypeToCompress(String contentTypeToCompress) {
if (this.innerModel().contentTypesToCompress() != null) {
this.innerModel().contentTypesToCompress().remove(contentTypeToCompress);
}
return this;
}
@Override
public CdnEndpointImpl withCompressionEnabled(boolean compressionEnabled) {
this.innerModel().withIsCompressionEnabled(compressionEnabled);
return this;
}
@Override
public CdnEndpointImpl withQueryStringCachingBehavior(QueryStringCachingBehavior cachingBehavior) {
this.innerModel().withQueryStringCachingBehavior(cachingBehavior);
return this;
}
@Override
public CdnEndpointImpl withGeoFilters(Collection<GeoFilter> geoFilters) {
List<GeoFilter> list = null;
if (geoFilters != null) {
list = new ArrayList<>(geoFilters);
}
this.innerModel().withGeoFilters(list);
return this;
}
@Override
public CdnEndpointImpl withoutGeoFilters() {
if (this.innerModel().geoFilters() != null) {
this.innerModel().geoFilters().clear();
}
return this;
}
@Override
public CdnEndpointImpl withGeoFilter(String relativePath, GeoFilterActions action, CountryIsoCode countryCode) {
GeoFilter geoFilter = this.createGeoFiltersObject(relativePath, action);
if (geoFilter.countryCodes() == null) {
geoFilter.withCountryCodes(new ArrayList<>());
}
geoFilter.countryCodes().add(countryCode.toString());
this.innerModel().geoFilters().add(geoFilter);
return this;
}
@Override
public CdnEndpointImpl withGeoFilter(
String relativePath, GeoFilterActions action, Collection<CountryIsoCode> countryCodes) {
GeoFilter geoFilter = this.createGeoFiltersObject(relativePath, action);
if (geoFilter.countryCodes() == null) {
geoFilter.withCountryCodes(new ArrayList<>());
} else {
geoFilter.countryCodes().clear();
}
for (CountryIsoCode countryCode : countryCodes) {
geoFilter.countryCodes().add(countryCode.toString());
}
this.innerModel().geoFilters().add(geoFilter);
return this;
}
@Override
public CdnEndpointImpl withoutGeoFilter(String relativePath) {
this.innerModel().geoFilters().removeIf(geoFilter -> geoFilter.relativePath().equals(relativePath));
return this;
}
@Override
public CdnEndpointImpl withCustomDomain(String hostName) {
this.customDomainList.add(new CustomDomainInner().withHostname(hostName));
return this;
}
@Override
public CdnStandardRulesEngineRuleImpl defineNewStandardRulesEngineRule(String name) {
throwIfNotStandardMicrosoftSku();
CdnStandardRulesEngineRuleImpl deliveryRule = new CdnStandardRulesEngineRuleImpl(this, name);
this.standardRulesEngineRuleMap.put(name, deliveryRule.innerModel());
return deliveryRule;
}
@Override
public CdnStandardRulesEngineRuleImpl updateStandardRulesEngineRule(String name) {
throwIfNotStandardMicrosoftSku();
return new CdnStandardRulesEngineRuleImpl(this, standardRulesEngineRules().get(name));
}
@Override
public CdnEndpointImpl withoutStandardRulesEngineRule(String name) {
throwIfNotStandardMicrosoftSku();
this.standardRulesEngineRuleMap.remove(name);
return this;
}
@Override
public CdnEndpointImpl withoutCustomDomain(String hostName) {
deletedCustomDomainList.add(new CustomDomainInner().withHostname(hostName));
return this;
}
private GeoFilter createGeoFiltersObject(String relativePath, GeoFilterActions action) {
if (this.innerModel().geoFilters() == null) {
this.innerModel().withGeoFilters(new ArrayList<>());
}
GeoFilter geoFilter = null;
for (GeoFilter filter : this.innerModel().geoFilters()) {
if (filter.relativePath().equals(relativePath)) {
geoFilter = filter;
break;
}
}
if (geoFilter == null) {
geoFilter = new GeoFilter();
} else {
this.innerModel().geoFilters().remove(geoFilter);
}
geoFilter.withRelativePath(relativePath)
.withAction(action);
return geoFilter;
}
private void initializeRuleMapForStandardMicrosoftSku() {
standardRulesEngineRuleMap.clear();
if (isStandardMicrosoftSku()
&& innerModel().deliveryPolicy() != null
&& innerModel().deliveryPolicy().rules() != null) {
for (DeliveryRule rule : innerModel().deliveryPolicy().rules()) {
this.standardRulesEngineRuleMap.put(rule.name(), rule);
}
}
}
private boolean isStandardMicrosoftSku() {
return SkuName.STANDARD_MICROSOFT.equals(parent().sku().name());
}
private void throwIfNotStandardMicrosoftSku() {
if (!isStandardMicrosoftSku()) {
throw new IllegalStateException(String.format(
"Standard rules engine only supports for Standard Microsoft SKU, "
+ "current SKU is %s", parent().sku().name()));
}
}
private void ensureDeliveryPolicy() {
if (innerModel().deliveryPolicy() == null) {
innerModel().withDeliveryPolicy(new EndpointPropertiesUpdateParametersDeliveryPolicy());
}
}
}
|
is `serviceRegistry.id()` guaranteed not null?
|
public boolean hasServiceRegistryBinding() {
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return false;
}
SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry();
if (serviceRegistry == null) {
return false;
}
return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null
&& serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID));
}
|
&& serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID));
|
public boolean hasServiceRegistryBinding() {
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return false;
}
SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry();
if (serviceRegistry == null) {
return false;
}
return addonConfigs.get(Constants.SERVICE_REGISTRY_KEY) != null
&& serviceRegistry.id().equalsIgnoreCase((String) addonConfigs.get(Constants.SERVICE_REGISTRY_KEY).get(Constants.BINDING_RESOURCE_ID));
}
|
class SpringAppImpl
extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService>
implements SpringApp, SpringApp.Definition, SpringApp.Update {
private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null;
private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this);
private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this);
private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this);
private FunctionalTaskItem setActiveDeploymentTask = null;
SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) {
super(name, parent, innerObject);
}
@Override
public boolean isPublic() {
if (innerModel().properties() == null) {
return false;
}
return innerModel().properties().publicProperty();
}
@Override
public boolean isHttpsOnly() {
if (innerModel().properties() == null) {
return false;
}
return innerModel().properties().httpsOnly();
}
@Override
public String url() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().url();
}
@Override
public String fqdn() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().fqdn();
}
@Override
public TemporaryDisk temporaryDisk() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().temporaryDisk();
}
@Override
public PersistentDisk persistentDisk() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().persistentDisk();
}
@Override
public ManagedIdentityProperties identity() {
return innerModel().identity();
}
@Override
public String activeDeploymentName() {
Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst();
return deployment.map(SpringAppDeployment::appName).orElse(null);
}
@Override
public SpringAppDeployment getActiveDeployment() {
return getActiveDeploymentAsync().block();
}
@Override
public Mono<SpringAppDeployment> getActiveDeploymentAsync() {
return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty();
}
@Override
@SuppressWarnings("unchecked")
public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() {
return (SpringAppDeployments<T>) deployments;
}
@Override
public SpringAppServiceBindings serviceBindings() {
return serviceBindings;
}
@Override
public SpringAppDomains customDomains() {
return domains;
}
@Override
public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() {
return manager().serviceClient().getApps().getResourceUploadUrlAsync(
parent().resourceGroupName(), parent().name(), name());
}
@Override
public ResourceUploadDefinition getResourceUploadUrl() {
return getResourceUploadUrlAsync().block();
}
private void ensureProperty() {
if (innerModel().properties() == null) {
innerModel().withProperties(new AppResourceProperties());
}
}
@Override
public boolean hasConfigurationServiceBinding() {
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return false;
}
SpringConfigurationService configurationService = parent().getDefaultConfigurationService();
if (configurationService == null) {
return false;
}
return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null
&& configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID));
}
@Override
@Override
public SpringAppImpl withDefaultPublicEndpoint() {
ensureProperty();
innerModel().properties().withPublicProperty(true);
return this;
}
@Override
public SpringAppImpl withoutDefaultPublicEndpoint() {
ensureProperty();
innerModel().properties().withPublicProperty(false);
return this;
}
@Override
public SpringAppImpl withCustomDomain(String domain) {
domains.prepareCreateOrUpdate(domain, new CustomDomainProperties());
return this;
}
@Override
public SpringAppImpl withCustomDomain(String domain, String certThumbprint) {
domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint));
return this;
}
@Override
public Update withoutCustomDomain(String domain) {
domains.prepareDelete(domain);
return this;
}
@Override
public SpringAppImpl withHttpsOnly() {
ensureProperty();
innerModel().properties().withHttpsOnly(true);
return this;
}
@Override
public SpringAppImpl withoutHttpsOnly() {
ensureProperty();
innerModel().properties().withHttpsOnly(false);
return this;
}
@Override
public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) {
ensureProperty();
innerModel().properties().withTemporaryDisk(
new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath));
return this;
}
@Override
public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) {
ensureProperty();
innerModel().properties().withPersistentDisk(
new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath));
return this;
}
@Override
public SpringAppImpl withActiveDeployment(String name) {
if (CoreUtils.isNullOrEmpty(name)) {
return this;
}
this.setActiveDeploymentTask =
context -> manager().serviceClient().getApps()
.setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name)))
.then(context.voidMono());
return this;
}
@Override
public void beforeGroupCreateOrUpdate() {
if (setActiveDeploymentTask != null) {
this.addPostRunDependent(setActiveDeploymentTask);
}
setActiveDeploymentTask = null;
}
@Override
public Mono<SpringApp> createResourceAsync() {
if (springAppDeploymentToCreate == null) {
withDefaultActiveDeployment();
}
return manager().serviceClient().getApps().createOrUpdateAsync(
parent().resourceGroupName(), parent().name(), name(), new AppResourceInner())
.thenMany(springAppDeploymentToCreate.createAsync())
.then(updateResourceAsync());
}
@Override
public Mono<SpringApp> updateResourceAsync() {
return manager().serviceClient().getApps().updateAsync(
parent().resourceGroupName(), parent().name(), name(), innerModel())
.map(inner -> {
setInner(inner);
return this;
});
}
@Override
public Mono<Void> deleteResourceAsync() {
return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name());
}
@Override
protected Mono<AppResourceInner> getInnerAsync() {
return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name());
}
@Override
public String id() {
return innerModel().id();
}
@Override
public SpringAppImpl update() {
prepareUpdate();
return this;
}
public AppPlatformManager manager() {
return parent().manager();
}
@Override
public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) {
serviceBindings.prepareCreateOrUpdate(name, bindingProperties);
return this;
}
@Override
public SpringAppImpl withoutServiceBinding(String name) {
serviceBindings.prepareDelete(name);
return this;
}
@Override
public SpringAppImpl withDefaultActiveDeployment() {
String defaultDeploymentName = "default";
withActiveDeployment(defaultDeploymentName);
springAppDeploymentToCreate = deployments().define(defaultDeploymentName)
.withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName));
return this;
}
@Override
@SuppressWarnings("unchecked")
public <T extends
SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>>
SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) {
return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name);
}
SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) {
withActiveDeployment(deployment.name());
springAppDeploymentToCreate = deployment;
return this;
}
@Override
public SpringAppImpl withConfigurationServiceBinding() {
ensureProperty();
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
addonConfigs = new HashMap<>();
innerModel().properties().withAddonConfigs(addonConfigs);
}
SpringConfigurationService configurationService = parent().getDefaultConfigurationService();
if (configurationService != null) {
Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>());
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id());
}
return this;
}
@Override
public SpringAppImpl withoutConfigurationServiceBinding() {
if (innerModel().properties() == null) {
return this;
}
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return this;
}
Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY);
if (configurationServiceConfigs == null) {
return this;
}
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, "");
return this;
}
@Override
public SpringAppImpl withServiceRegistryBinding() {
ensureProperty();
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
addonConfigs = new HashMap<>();
innerModel().properties().withAddonConfigs(addonConfigs);
}
SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry();
if (serviceRegistry != null) {
Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>());
serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id());
}
return this;
}
@Override
public SpringAppImpl withoutServiceRegistryBinding() {
if (innerModel().properties() == null) {
return this;
}
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return this;
}
Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY);
if (configurationServiceConfigs == null) {
return this;
}
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, "");
return this;
}
}
|
class SpringAppImpl
extends ExternalChildResourceImpl<SpringApp, AppResourceInner, SpringServiceImpl, SpringService>
implements SpringApp, SpringApp.Definition, SpringApp.Update {
private Creatable<SpringAppDeployment> springAppDeploymentToCreate = null;
private final SpringAppDeploymentsImpl deployments = new SpringAppDeploymentsImpl(this);
private final SpringAppServiceBindingsImpl serviceBindings = new SpringAppServiceBindingsImpl(this);
private final SpringAppDomainsImpl domains = new SpringAppDomainsImpl(this);
private FunctionalTaskItem setActiveDeploymentTask = null;
SpringAppImpl(String name, SpringServiceImpl parent, AppResourceInner innerObject) {
super(name, parent, innerObject);
}
@Override
public boolean isPublic() {
if (innerModel().properties() == null) {
return false;
}
return innerModel().properties().publicProperty();
}
@Override
public boolean isHttpsOnly() {
if (innerModel().properties() == null) {
return false;
}
return innerModel().properties().httpsOnly();
}
@Override
public String url() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().url();
}
@Override
public String fqdn() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().fqdn();
}
@Override
public TemporaryDisk temporaryDisk() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().temporaryDisk();
}
@Override
public PersistentDisk persistentDisk() {
if (innerModel().properties() == null) {
return null;
}
return innerModel().properties().persistentDisk();
}
@Override
public ManagedIdentityProperties identity() {
return innerModel().identity();
}
@Override
public String activeDeploymentName() {
Optional<SpringAppDeployment> deployment = deployments.list().stream().filter(SpringAppDeployment::isActive).findFirst();
return deployment.map(SpringAppDeployment::appName).orElse(null);
}
@Override
public SpringAppDeployment getActiveDeployment() {
return getActiveDeploymentAsync().block();
}
@Override
public Mono<SpringAppDeployment> getActiveDeploymentAsync() {
return deployments.listAsync().filter(SpringAppDeployment::isActive).singleOrEmpty();
}
@Override
@SuppressWarnings("unchecked")
public <T extends SpringAppDeployment.DefinitionStages.WithCreate<T>> SpringAppDeployments<T> deployments() {
return (SpringAppDeployments<T>) deployments;
}
@Override
public SpringAppServiceBindings serviceBindings() {
return serviceBindings;
}
@Override
public SpringAppDomains customDomains() {
return domains;
}
@Override
public Mono<ResourceUploadDefinition> getResourceUploadUrlAsync() {
return manager().serviceClient().getApps().getResourceUploadUrlAsync(
parent().resourceGroupName(), parent().name(), name());
}
@Override
public ResourceUploadDefinition getResourceUploadUrl() {
return getResourceUploadUrlAsync().block();
}
private void ensureProperty() {
if (innerModel().properties() == null) {
innerModel().withProperties(new AppResourceProperties());
}
}
@Override
public boolean hasConfigurationServiceBinding() {
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return false;
}
SpringConfigurationService configurationService = parent().getDefaultConfigurationService();
if (configurationService == null) {
return false;
}
return addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY) != null
&& configurationService.id().equalsIgnoreCase((String) addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY).get(Constants.BINDING_RESOURCE_ID));
}
@Override
@Override
public SpringAppImpl withDefaultPublicEndpoint() {
ensureProperty();
innerModel().properties().withPublicProperty(true);
return this;
}
@Override
public SpringAppImpl withoutDefaultPublicEndpoint() {
ensureProperty();
innerModel().properties().withPublicProperty(false);
return this;
}
@Override
public SpringAppImpl withCustomDomain(String domain) {
domains.prepareCreateOrUpdate(domain, new CustomDomainProperties());
return this;
}
@Override
public SpringAppImpl withCustomDomain(String domain, String certThumbprint) {
domains.prepareCreateOrUpdate(domain, new CustomDomainProperties().withThumbprint(certThumbprint));
return this;
}
@Override
public Update withoutCustomDomain(String domain) {
domains.prepareDelete(domain);
return this;
}
@Override
public SpringAppImpl withHttpsOnly() {
ensureProperty();
innerModel().properties().withHttpsOnly(true);
return this;
}
@Override
public SpringAppImpl withoutHttpsOnly() {
ensureProperty();
innerModel().properties().withHttpsOnly(false);
return this;
}
@Override
public SpringAppImpl withTemporaryDisk(int sizeInGB, String mountPath) {
ensureProperty();
innerModel().properties().withTemporaryDisk(
new TemporaryDisk().withSizeInGB(sizeInGB).withMountPath(mountPath));
return this;
}
@Override
public SpringAppImpl withPersistentDisk(int sizeInGB, String mountPath) {
ensureProperty();
innerModel().properties().withPersistentDisk(
new PersistentDisk().withSizeInGB(sizeInGB).withMountPath(mountPath));
return this;
}
@Override
public SpringAppImpl withActiveDeployment(String name) {
if (CoreUtils.isNullOrEmpty(name)) {
return this;
}
this.setActiveDeploymentTask =
context -> manager().serviceClient().getApps()
.setActiveDeploymentsAsync(parent().resourceGroupName(), parent().name(), name(), new ActiveDeploymentCollection().withActiveDeploymentNames(Arrays.asList(name)))
.then(context.voidMono());
return this;
}
@Override
public void beforeGroupCreateOrUpdate() {
if (setActiveDeploymentTask != null) {
this.addPostRunDependent(setActiveDeploymentTask);
}
setActiveDeploymentTask = null;
}
@Override
public Mono<SpringApp> createResourceAsync() {
if (springAppDeploymentToCreate == null) {
withDefaultActiveDeployment();
}
return manager().serviceClient().getApps().createOrUpdateAsync(
parent().resourceGroupName(), parent().name(), name(), new AppResourceInner())
.thenMany(springAppDeploymentToCreate.createAsync())
.then(updateResourceAsync());
}
@Override
public Mono<SpringApp> updateResourceAsync() {
return manager().serviceClient().getApps().updateAsync(
parent().resourceGroupName(), parent().name(), name(), innerModel())
.map(inner -> {
setInner(inner);
return this;
});
}
@Override
public Mono<Void> deleteResourceAsync() {
return manager().serviceClient().getApps().deleteAsync(parent().resourceGroupName(), parent().name(), name());
}
@Override
protected Mono<AppResourceInner> getInnerAsync() {
return manager().serviceClient().getApps().getAsync(parent().resourceGroupName(), parent().name(), name());
}
@Override
public String id() {
return innerModel().id();
}
@Override
public SpringAppImpl update() {
prepareUpdate();
return this;
}
public AppPlatformManager manager() {
return parent().manager();
}
@Override
public SpringAppImpl withServiceBinding(String name, BindingResourceProperties bindingProperties) {
serviceBindings.prepareCreateOrUpdate(name, bindingProperties);
return this;
}
@Override
public SpringAppImpl withoutServiceBinding(String name) {
serviceBindings.prepareDelete(name);
return this;
}
@Override
public SpringAppImpl withDefaultActiveDeployment() {
String defaultDeploymentName = "default";
withActiveDeployment(defaultDeploymentName);
springAppDeploymentToCreate = deployments().define(defaultDeploymentName)
.withExistingSource(UserSourceType.JAR, String.format("<%s>", defaultDeploymentName));
return this;
}
@Override
@SuppressWarnings("unchecked")
public <T extends
SpringAppDeployment.DefinitionStages.WithAttach<? extends SpringApp.DefinitionStages.WithCreate, T>>
SpringAppDeployment.DefinitionStages.Blank<T> defineActiveDeployment(String name) {
return (SpringAppDeployment.DefinitionStages.Blank<T>) deployments.define(name);
}
SpringAppImpl addActiveDeployment(SpringAppDeploymentImpl deployment) {
withActiveDeployment(deployment.name());
springAppDeploymentToCreate = deployment;
return this;
}
@Override
public SpringAppImpl withConfigurationServiceBinding() {
ensureProperty();
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
addonConfigs = new HashMap<>();
innerModel().properties().withAddonConfigs(addonConfigs);
}
SpringConfigurationService configurationService = parent().getDefaultConfigurationService();
if (configurationService != null) {
Map<String, Object> configurationServiceConfigs = addonConfigs.computeIfAbsent(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY, k -> new HashMap<>());
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, configurationService.id());
}
return this;
}
@Override
public SpringAppImpl withoutConfigurationServiceBinding() {
if (innerModel().properties() == null) {
return this;
}
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return this;
}
Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.APPLICATION_CONFIGURATION_SERVICE_KEY);
if (configurationServiceConfigs == null) {
return this;
}
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, "");
return this;
}
@Override
public SpringAppImpl withServiceRegistryBinding() {
ensureProperty();
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
addonConfigs = new HashMap<>();
innerModel().properties().withAddonConfigs(addonConfigs);
}
SpringServiceRegistry serviceRegistry = parent().getDefaultServiceRegistry();
if (serviceRegistry != null) {
Map<String, Object> serviceRegistryConfigs = addonConfigs.computeIfAbsent(Constants.SERVICE_REGISTRY_KEY, k -> new HashMap<>());
serviceRegistryConfigs.put(Constants.BINDING_RESOURCE_ID, serviceRegistry.id());
}
return this;
}
@Override
public SpringAppImpl withoutServiceRegistryBinding() {
if (innerModel().properties() == null) {
return this;
}
Map<String, Map<String, Object>> addonConfigs = innerModel().properties().addonConfigs();
if (addonConfigs == null) {
return this;
}
Map<String, Object> configurationServiceConfigs = addonConfigs.get(Constants.SERVICE_REGISTRY_KEY);
if (configurationServiceConfigs == null) {
return this;
}
configurationServiceConfigs.put(Constants.BINDING_RESOURCE_ID, "");
return this;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.