project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_scheme_check_forbid_profile (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_scheme = get_user_auth_scheme_module(config, json_string_value(json_object_get(j_param, "scheme_name")));
int ret = U_CALLBACK_CONTINUE;
if (check_result_value(j_scheme, G_OK)) {
if (json_object_get(json_object_get(j_scheme, "module"), "forbid_user_profile") == json_true()) {
response->status = 403;
ret = U_CALLBACK_COMPLETE;
}
} else if (check_result_value(j_scheme, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_scheme_check_forbid_profile - Error auth_register_get_user_scheme");
response->status = 500;
}
json_decref(j_param);
json_decref(j_scheme);
return ret;
}
| null | null | 219,990
|
323799741102655456172467996294398681740
| 20
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_scope (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_search_scope;
j_search_scope = get_scope(config, u_map_get(request->map_url, "scope"));
if (check_result_value(j_search_scope, G_OK)) {
if (delete_scope(config, u_map_get(request->map_url, "scope")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_scope - Error delete_scope");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - Scope '%s' removed", u_map_get(request->map_url, "scope"));
}
} else if (check_result_value(j_search_scope, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_scope - Error get_scope");
response->status = 500;
}
json_decref(j_search_scope);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,991
|
21879157829563128577889659573080658049
| 21
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_add_client (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_client, * j_client_valid, * j_search_client, * j_body;
j_client = ulfius_get_json_body_request(request, NULL);
if (j_client != NULL) {
j_client_valid = is_client_valid(config, NULL, j_client, 1, u_map_get(request->map_url, "source"));
if (check_result_value(j_client_valid, G_OK)) {
j_search_client = get_client(config, json_string_value(json_object_get(j_client, "client_id")), u_map_get(request->map_url, "source"));
if (check_result_value(j_search_client, G_ERROR_NOT_FOUND)) {
if (add_client(config, j_client, u_map_get(request->map_url, "source")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_add_client - Error add_client");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - Client '%s' added", json_string_value(json_object_get(j_client, "client_id")));
}
} else if (check_result_value(j_search_client, G_OK)) {
j_body = json_pack("{s[s]}", "error", "client_id already exists");
ulfius_set_json_body_response(response, 400, j_body);
json_decref(j_body);
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_add_client - Error get_client");
response->status = 500;
}
json_decref(j_search_client);
} else if (check_result_value(j_client_valid, G_ERROR_PARAM)) {
if (json_object_get(j_client_valid, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_client_valid, "error"));
} else {
response->status = 400;
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_add_client - Error is_client_valid");
response->status = 500;
}
json_decref(j_client_valid);
} else {
response->status = 400;
}
json_decref(j_client);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,992
|
190962846730909635851693000695238650571
| 42
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_user_module (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_search_module;
j_search_module = get_user_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
if (delete_user_module(config, u_map_get(request->map_url, "name")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user_module - Error delete_user_module");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - User backend module '%s' removed", u_map_get(request->map_url, "name"));
}
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user_module - Error get_user_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,993
|
231395336933080766607525163619819774680
| 21
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_auth_register_get (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
if (j_param != NULL) {
if (json_object_get(j_param, "username") != NULL && json_string_length(json_object_get(j_param, "username"))) {
if (0 == o_strcasecmp(json_string_value(json_object_get((json_t *)response->shared_data, "username")), json_string_value(json_object_get(j_param, "username")))) {
if (json_object_get(j_param, "scheme_type") != NULL && json_string_length(json_object_get(j_param, "scheme_type")) && json_object_get(j_param, "scheme_name") != NULL && json_string_length(json_object_get(j_param, "scheme_name"))) {
j_result = auth_register_get_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
response->status = 401;
} else if (check_result_value(j_result, G_OK)) {
if (json_object_get(j_result, "register") != NULL) {
ulfius_set_json_body_response(response, 200, json_object_get(j_result, "register"));
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth_register_get - Error auth_register_get_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "scheme is mandatory");
}
} else {
ulfius_set_string_body_response(response, 400, "username invalid");
}
} else {
ulfius_set_string_body_response(response, 400, "username is mandatory");
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,994
|
169867575768772151482583257885634242259
| 39
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_user (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_user;
j_user = get_user(config, u_map_get(request->map_url, "username"), u_map_get(request->map_url, "source"));
if (check_result_value(j_user, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_user, "user"));
} else if (check_result_value(j_user, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user - Error j_user");
response->status = 500;
}
json_decref(j_user);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,995
|
104573234001727025029555040790853304296
| 16
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_user_module (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_module;
j_module = get_user_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else if (check_result_value(j_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_module - Error get_user_module");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,996
|
183006171992755318096267505804967036137
| 16
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_auth_trigger (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
if (j_param != NULL) {
if (json_string_length(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_name"))) {
if (json_string_length(json_object_get(j_param, "username"))) {
j_result = auth_trigger_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
response->status = 401;
} else if (check_result_value(j_result, G_OK)) {
if (json_object_get(j_result, "trigger") != NULL) {
ulfius_set_json_body_response(response, 200, json_object_get(j_result, "trigger"));
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth_trigger - Error auth_trigger_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
j_result = auth_trigger_identify_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
ulfius_set_string_body_response(response, 400, "bad scheme response");
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
response->status = 401;
} else if (check_result_value(j_result, G_OK)) {
if (json_object_get(j_result, "trigger") != NULL) {
ulfius_set_json_body_response(response, 200, json_object_get(j_result, "trigger"));
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth_trigger - Error auth_trigger_identify_scheme");
response->status = 500;
}
json_decref(j_result);
}
} else {
ulfius_set_string_body_response(response, 400, "scheme is mandatory");
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,997
|
122848707373272220068112076470629353600
| 50
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_plugin_module_list (const struct _u_request * request, struct _u_response * response, void * plugin_data) {
UNUSED(request);
struct config_elements * config = (struct config_elements *)plugin_data;
json_t * j_module;
j_module = get_plugin_module_list(config);
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_plugin_module_list - Error get_plugin_module_list");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,998
|
29090744762450577018403806894552968448
| 15
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_get_schemes_from_scopes (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_result;
char * session_uid = get_session_id(config, request);
if (u_map_get(request->map_url, "scope") != NULL) {
j_result = get_validated_auth_scheme_list_from_scope_list(config, u_map_get(request->map_url, "scope"), session_uid);
if (check_result_value(j_result, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_result, "scheme"));
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_get_schemes_from_scopes - Error get_validated_auth_scheme_list_from_scope_list");
response->status = 500;
}
json_decref(j_result);
} else {
response->status = 400;
}
o_free(session_uid);
return U_CALLBACK_CONTINUE;
}
| null | null | 219,999
|
240674310111502399495138507514486567399
| 23
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_api_key (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
if (disable_api_key(config, u_map_get(request->map_url, "key_hash")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_api_key - Error disable_api_key");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - API key disabled by user '%s'", json_string_value(json_object_get((json_t *)response->shared_data, "username")));
}
return U_CALLBACK_CONTINUE;
}
| null | null | 220,000
|
152339618925675466691370544538516383808
| 11
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_user_middleware_module (const struct _u_request * request, struct _u_response * response, void * user_middleware_data) {
struct config_elements * config = (struct config_elements *)user_middleware_data;
json_t * j_search_module;
j_search_module = get_user_middleware_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
if (delete_user_middleware_module(config, u_map_get(request->map_url, "name")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user_middleware_module - Error delete_user_middleware_module");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - User backend module '%s' removed", u_map_get(request->map_url, "name"));
}
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user_middleware_module - Error get_user_middleware_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,001
|
50147295785804808889549020891028581687
| 21
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_user_auth_scheme_module_list (const struct _u_request * request, struct _u_response * response, void * user_auth_scheme_data) {
UNUSED(request);
struct config_elements * config = (struct config_elements *)user_auth_scheme_data;
json_t * j_module;
j_module = get_user_auth_scheme_module_list(config);
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_auth_scheme_module_list - Error get_user_auth_scheme_module_list");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,002
|
91523132211419165236309447551551712203
| 15
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_auth_register (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_param = ulfius_get_json_body_request(request, NULL), * j_result = NULL;
if (j_param != NULL) {
if (json_object_get(j_param, "username") != NULL && json_is_string(json_object_get(j_param, "username")) && json_string_length(json_object_get(j_param, "username"))) {
if (0 == o_strcasecmp(json_string_value(json_object_get((json_t *)response->shared_data, "username")), json_string_value(json_object_get(j_param, "username")))) {
if (json_object_get(j_param, "scheme_type") != NULL && json_is_string(json_object_get(j_param, "scheme_type")) && json_string_length(json_object_get(j_param, "scheme_type")) && json_object_get(j_param, "scheme_name") != NULL && json_is_string(json_object_get(j_param, "scheme_name")) && json_string_length(json_object_get(j_param, "scheme_name"))) {
j_result = auth_register_user_scheme(config, json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")), json_string_value(json_object_get(j_param, "username")), 0, json_object_get(j_param, "value"), request);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "register") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "register"));
} else {
ulfius_set_string_body_response(response, 400, "bad scheme response");
}
} else if (check_result_value(j_result, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (check_result_value(j_result, G_ERROR_UNAUTHORIZED)) {
response->status = 401;
} else if (check_result_value(j_result, G_OK)) {
if (json_object_get(j_result, "register") != NULL) {
ulfius_set_json_body_response(response, 200, json_object_get(j_result, "register"));
}
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' registered scheme '%s/%s'", json_string_value(json_object_get(j_param, "username")), json_string_value(json_object_get(j_param, "scheme_type")), json_string_value(json_object_get(j_param, "scheme_name")));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_auth_register - Error auth_check_user_scheme");
response->status = 500;
}
json_decref(j_result);
} else {
ulfius_set_string_body_response(response, 400, "scheme is mandatory");
}
} else {
ulfius_set_string_body_response(response, 400, "username invalid");
}
} else {
ulfius_set_string_body_response(response, 400, "username is mandatory");
}
} else {
ulfius_set_string_body_response(response, 400, "Input parameters must be in JSON format");
}
json_decref(j_param);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,003
|
35349864808407577861370027898489708536
| 44
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_module_type_list (const struct _u_request * request, struct _u_response * response, void * user_data) {
UNUSED(request);
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_module_type;
j_module_type = get_module_type_list(config);
if (check_result_value(j_module_type, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module_type, "module"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_module_type_list - Error get_module_type_list");
response->status = 500;
}
json_decref(j_module_type);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,004
|
159427887499228592529193381631002207000
| 15
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_client (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_client;
j_client = get_client(config, u_map_get(request->map_url, "client_id"), u_map_get(request->map_url, "source"));
if (check_result_value(j_client, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_client, "client"));
} else if (check_result_value(j_client, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_client - Error j_client");
response->status = 500;
}
json_decref(j_client);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,005
|
22131840475938275448881440623001268483
| 16
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_user (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_search_user;
j_search_user = get_user(config, u_map_get(request->map_url, "username"), u_map_get(request->map_url, "source"));
if (check_result_value(j_search_user, G_OK)) {
if (delete_user(config, u_map_get(request->map_url, "username"), json_string_value(json_object_get(json_object_get(j_search_user, "user"), "source"))) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user - Error delete_user");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - User '%s' removed", u_map_get(request->map_url, "username"));
}
} else if (check_result_value(j_search_user, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_user - Error get_user");
response->status = 500;
}
json_decref(j_search_user);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,006
|
12695331463274626007087002458721701683
| 21
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_get_client_grant_list (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_client_grant_list;
size_t offset = 0, limit = GLEWLWYD_DEFAULT_LIMIT_SIZE;
long int l_converted = 0;
char * endptr = NULL;
if (u_map_get(request->map_url, "offset") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "offset"), &endptr, 10);
if (!(*endptr) && l_converted > 0) {
offset = (size_t)l_converted;
}
}
if (u_map_get(request->map_url, "limit") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "limit"), &endptr, 10);
if (!(*endptr) && l_converted > 0) {
limit = (size_t)l_converted;
}
}
j_client_grant_list = get_client_grant_list(config, json_string_value(json_object_get((json_t *)response->shared_data, "username")), offset, limit);
if (check_result_value(j_client_grant_list, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_client_grant_list, "client_grant"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_get_session_list - Error get_user_session_list");
response->status = 500;
}
json_decref(j_client_grant_list);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,007
|
59568825142455739375783287636187303551
| 29
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_manage_user_auth_scheme_module (const struct _u_request * request, struct _u_response * response, void * user_auth_scheme_data) {
struct config_elements * config = (struct config_elements *)user_auth_scheme_data;
json_t * j_search_module, * j_result, * j_result2;
j_search_module = get_user_auth_scheme_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
if (0 == o_strcmp("enable", u_map_get(request->map_url, "action"))) {
j_result = manage_user_auth_scheme_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_START);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_user_auth_scheme_module - Error manage_user_auth_scheme_module enable");
response->status = 500;
}
json_decref(j_result);
} else if (0 == o_strcmp("disable", u_map_get(request->map_url, "action"))) {
j_result = manage_user_auth_scheme_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_STOP);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_user_auth_scheme_module - Error manage_user_auth_scheme_module disable");
response->status = 500;
}
json_decref(j_result);
} else if (0 == o_strcmp("reset", u_map_get(request->map_url, "action"))) {
j_result = manage_user_auth_scheme_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_STOP);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_user_auth_scheme_module - Error manage_user_auth_scheme_module reset (1)");
response->status = 500;
} else {
j_result2 = manage_user_auth_scheme_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_START);
if (check_result_value(j_result2, G_ERROR_PARAM)) {
if (json_object_get(j_result2, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result2, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_user_auth_scheme_module - Error manage_user_auth_scheme_module reset (2)");
response->status = 500;
}
json_decref(j_result2);
}
json_decref(j_result);
} else {
response->status = 400;
}
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_user_auth_scheme_module - Error get_user_auth_scheme_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,008
|
252416580612082755427483357680287094142
| 70
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_get_profile (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_session;
char * session_uid, expires[129];
time_t now;
struct tm ts;
time(&now);
now += GLEWLWYD_DEFAULT_SESSION_EXPIRATION_COOKIE;
gmtime_r(&now, &ts);
strftime(expires, 128, "%a, %d %b %Y %T %Z", &ts);
if (!o_strlen(u_map_get(request->map_url, "username"))) {
session_uid = get_session_id(config, request);
if (session_uid != NULL && o_strlen(session_uid)) {
j_session = get_users_for_session(config, session_uid);
if (check_result_value(j_session, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_session, "session"));
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
} else if (check_result_value(j_session, G_ERROR_NOT_FOUND)) {
response->status = 401;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_get_session - Error get_current_user_for_session");
response->status = 500;
}
json_decref(j_session);
} else {
response->status = 401;
}
o_free(session_uid);
} else {
// Can't impersonate this endpoint
response->status = 400;
}
return U_CALLBACK_CONTINUE;
}
| null | null | 220,009
|
323966465577295822778227022249700929020
| 36
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_user_auth_scheme_module (const struct _u_request * request, struct _u_response * response, void * user_auth_scheme_data) {
struct config_elements * config = (struct config_elements *)user_auth_scheme_data;
json_t * j_module;
j_module = get_user_auth_scheme_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else if (check_result_value(j_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_auth_scheme_module - Error get_user_auth_scheme_module");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,010
|
43596409082378717232058718010512898406
| 16
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_set_user_middleware_module (const struct _u_request * request, struct _u_response * response, void * user_middleware_data) {
struct config_elements * config = (struct config_elements *)user_middleware_data;
json_t * j_module, * j_module_valid, * j_search_module;
j_search_module = get_user_middleware_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
j_module = ulfius_get_json_body_request(request, NULL);
if (j_module != NULL) {
json_object_del(j_module, "enabled");
j_module_valid = is_user_middleware_module_valid(config, j_module, 0);
if (check_result_value(j_module_valid, G_OK)) {
if (set_user_middleware_module(config, u_map_get(request->map_url, "name"), j_module) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_user_middleware_module - Error set_user_middleware_module");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - User backend module '%s' updated", u_map_get(request->map_url, "name"));
}
} else if (check_result_value(j_module_valid, G_ERROR_PARAM)) {
if (json_object_get(j_module_valid, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_module_valid, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_module_valid, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_user_middleware_module - Error is_user_middleware_module_valid");
response->status = 500;
}
json_decref(j_module_valid);
} else {
response->status = 400;
}
json_decref(j_module);
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_user_middleware_module - Error get_user_middleware_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,011
|
216776040311474698522153176355253445048
| 41
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_options (const struct _u_request * request, struct _u_response * response, void * user_data) {
UNUSED(request);
UNUSED(user_data);
ulfius_add_header_to_response(response, "Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS");
ulfius_add_header_to_response(response, "Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Bearer, Authorization");
ulfius_add_header_to_response(response, "Access-Control-Max-Age", "1800");
return U_CALLBACK_COMPLETE;
}
| null | null | 220,012
|
14017123269517116578682436534721606360
| 8
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_user_list (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_user_list;
size_t offset = 0, limit = GLEWLWYD_DEFAULT_LIMIT_SIZE;
long int l_converted = 0;
char * endptr = NULL;
if (u_map_get(request->map_url, "offset") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "offset"), &endptr, 10);
if (!(*endptr) && l_converted > 0) {
offset = (size_t)l_converted;
}
}
if (u_map_get(request->map_url, "limit") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "limit"), &endptr, 10);
if (!(*endptr) && l_converted > 0) {
limit = (size_t)l_converted;
}
}
j_user_list = get_user_list(config, u_map_get(request->map_url, "pattern"), offset, limit, u_map_get(request->map_url, "source"));
if (check_result_value(j_user_list, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_user_list, "user"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_list - Error get_user_list");
response->status = 500;
}
json_decref(j_user_list);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,013
|
2123312216812427358131300052520455829
| 29
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_user_delete_session (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_session, * j_cur_session;
char * session_uid = get_session_id(config, request), expires[129];
size_t index;
time_t now;
struct tm ts;
time(&now);
now += GLEWLWYD_DEFAULT_SESSION_EXPIRATION_COOKIE;
gmtime_r(&now, &ts);
strftime(expires, 128, "%a, %d %b %Y %T %Z", &ts);
if (session_uid != NULL && o_strlen(session_uid)) {
j_session = get_users_for_session(config, session_uid);
if (check_result_value(j_session, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else if (!check_result_value(j_session, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_delete_session - Error get_current_user_for_session");
response->status = 500;
} else {
if (u_map_get(request->map_url, "username") != NULL) {
json_array_foreach(json_object_get(j_session, "session"), index, j_cur_session) {
if (0 == o_strcasecmp(u_map_get(request->map_url, "username"), json_string_value(json_object_get(j_cur_session, "username")))) {
if (user_session_delete(config, session_uid, u_map_get(request->map_url, "username")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_delete_session - Error user_session_delete");
response->status = 500;
}
}
}
if (json_array_size(json_object_get(j_session, "session")) == 1) {
// Delete session cookie on the client browser
ulfius_add_cookie_to_response(response, config->session_key, "", expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
} else {
ulfius_add_cookie_to_response(response, config->session_key, session_uid, expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
}
} else {
if (user_session_delete(config, session_uid, NULL) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_user_delete_session - Error user_session_delete");
response->status = 500;
}
// Delete session cookie on the client browser
ulfius_add_cookie_to_response(response, config->session_key, "", expires, 0, config->cookie_domain, "/", config->cookie_secure, 0);
}
}
json_decref(j_session);
} else {
response->status = 401;
}
o_free(session_uid);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,014
|
145933048978816320600982948090882647555
| 52
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_manage_plugin_module (const struct _u_request * request, struct _u_response * response, void * plugin_data) {
struct config_elements * config = (struct config_elements *)plugin_data;
json_t * j_search_module, * j_result, * j_result2;
j_search_module = get_plugin_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
if (0 == o_strcmp("enable", u_map_get(request->map_url, "action"))) {
j_result = manage_plugin_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_START);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_plugin_module - Error manage_plugin_module enable");
response->status = 500;
}
json_decref(j_result);
} else if (0 == o_strcmp("disable", u_map_get(request->map_url, "action"))) {
j_result = manage_plugin_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_STOP);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_plugin_module - Error manage_plugin_module disable");
response->status = 500;
}
json_decref(j_result);
} else if (0 == o_strcmp("reset", u_map_get(request->map_url, "action"))) {
j_result = manage_plugin_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_STOP);
if (check_result_value(j_result, G_ERROR_PARAM)) {
if (json_object_get(j_result, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_plugin_module - Error manage_plugin_module reset (1)");
response->status = 500;
} else {
j_result2 = manage_plugin_module(config, u_map_get(request->map_url, "name"), GLEWLWYD_MODULE_ACTION_START);
if (check_result_value(j_result2, G_ERROR_PARAM)) {
if (json_object_get(j_result2, "error") != NULL) {
ulfius_set_json_body_response(response, 400, json_object_get(j_result2, "error"));
} else {
response->status = 400;
}
} else if (!check_result_value(j_result2, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_plugin_module - Error manage_plugin_module reset (1)");
response->status = 500;
}
json_decref(j_result2);
}
json_decref(j_result);
} else {
response->status = 400;
}
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_manage_plugin_module - Error get_plugin_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,015
|
64919575789391125064178405767061830021
| 70
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_plugin_module (const struct _u_request * request, struct _u_response * response, void * plugin_data) {
struct config_elements * config = (struct config_elements *)plugin_data;
json_t * j_search_module;
j_search_module = get_plugin_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_search_module, G_OK)) {
if (delete_plugin_module(config, u_map_get(request->map_url, "name")) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_plugin_module - Error delete_plugin_module");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - Plugin module '%s' removed", u_map_get(request->map_url, "name"));
}
} else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_plugin_module - Error get_plugin_module");
response->status = 500;
}
json_decref(j_search_module);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,016
|
281284957231926475041074909570229348411
| 21
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_get_api_key_list (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_api_key_list;
size_t offset = 0, limit = GLEWLWYD_DEFAULT_LIMIT_SIZE;
long int l_converted = 0;
char * endptr = NULL;
if (u_map_get(request->map_url, "offset") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "offset"), &endptr, 10);
if (!(*endptr) && l_converted > 0) {
offset = (size_t)l_converted;
}
}
if (u_map_get(request->map_url, "limit") != NULL) {
l_converted = strtol(u_map_get(request->map_url, "limit"), &endptr, 10);
if (!(*endptr) && l_converted >= 0) {
limit = (size_t)l_converted;
}
}
j_api_key_list = get_api_key_list(config, u_map_get(request->map_url, "pattern"), offset, limit);
if (check_result_value(j_api_key_list, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_api_key_list, "api_key"));
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_api_key_list - Error get_api_key_list");
response->status = 500;
}
json_decref(j_api_key_list);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,017
|
334459885034372066632931635412266549891
| 29
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_set_scope (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_scope, * j_scope_valid, * j_search_scope;
j_search_scope = get_scope(config, u_map_get(request->map_url, "scope"));
if (check_result_value(j_search_scope, G_OK)) {
j_scope = ulfius_get_json_body_request(request, NULL);
if (j_scope != NULL) {
j_scope_valid = is_scope_valid(config, j_scope, 0);
if (check_result_value(j_scope_valid, G_OK)) {
if (set_scope(config, u_map_get(request->map_url, "scope"), j_scope) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_scope - Error set_scope");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - Scope '%s' updated", u_map_get(request->map_url, "scope"));
}
} else if (check_result_value(j_scope_valid, G_ERROR_PARAM)) {
ulfius_set_json_body_response(response, 400, json_object_get(j_scope_valid, "error"));
} else if (!check_result_value(j_scope_valid, G_OK)) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_scope - Error is_scope_valid");
response->status = 500;
}
json_decref(j_scope_valid);
} else {
response->status = 400;
}
json_decref(j_scope);
} else if (check_result_value(j_search_scope, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_scope - Error get_scope");
response->status = 500;
}
json_decref(j_search_scope);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,018
|
239487662588005483077853641815716212245
| 36
|
Fix update session when auth fail
|
other
|
glewlwyd
|
125281f1c0d4b6a8b49f7e55a757205a2ef01fbe
| 0
|
int callback_glewlwyd_delete_client (const struct _u_request * request, struct _u_response * response, void * user_data) {
struct config_elements * config = (struct config_elements *)user_data;
json_t * j_search_client;
j_search_client = get_client(config, u_map_get(request->map_url, "client_id"), u_map_get(request->map_url, "source"));
if (check_result_value(j_search_client, G_OK)) {
if (delete_client(config, u_map_get(request->map_url, "client_id"), json_string_value(json_object_get(json_object_get(j_search_client, "client"), "source"))) != G_OK) {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_client - Error delete_client");
response->status = 500;
} else {
y_log_message(Y_LOG_LEVEL_INFO, "Event - Client '%s' removed", u_map_get(request->map_url, "client_id"));
}
} else if (check_result_value(j_search_client, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_delete_client - Error get_client");
response->status = 500;
}
json_decref(j_search_client);
return U_CALLBACK_CONTINUE;
}
| null | null | 220,019
|
179210584371602386566036131265852630150
| 21
|
Fix update session when auth fail
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
Status GetMap(OpKernelContext* ctx, bool is_writing,
SparseTensorsMap** sparse_tensors_map) {
mutex_lock l(mu_);
if (sparse_tensors_map_) {
*sparse_tensors_map = sparse_tensors_map_;
return Status::OK();
}
TF_RETURN_IF_ERROR(cinfo_.Init(ctx->resource_manager(), def(),
is_writing /* use_node_name_as_default */));
CreatorCallback sparse_tensors_map_creator = [this](SparseTensorsMap** c) {
SparseTensorsMap* map = new SparseTensorsMap(cinfo_.name());
*c = map;
return Status::OK();
};
TF_RETURN_IF_ERROR(
cinfo_.resource_manager()->LookupOrCreate<SparseTensorsMap>(
cinfo_.container(), cinfo_.name(), &sparse_tensors_map_,
sparse_tensors_map_creator));
*sparse_tensors_map = sparse_tensors_map_;
return Status::OK();
}
| null | null | 220,020
|
236229153672534758036755058162740165224
| 26
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
OP_REQUIRES(
context,
input_values->shape().dim_size(0) == input_indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
input_values->shape().dim_size(0),
" values, indices shape: ", input_indices->shape().DebugString()));
OP_REQUIRES(
context,
input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", input_shape->shape().dim_size(0),
" dimensions, indices shape: ",
input_indices->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
TensorShape tensor_input_shape;
OP_REQUIRES_OK(context, TensorShape::BuildTensorShape(input_shape_vec,
&tensor_input_shape));
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
| null | null | 220,021
|
5056790565297333035030994662161974460
| 132
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
~SparseTensorsMap() override {}
| null | null | 220,022
|
214932371833144790213427783339339808633
| 1
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
explicit TakeManySparseFromTensorsMapOp(OpKernelConstruction* context)
: SparseTensorAccessingOp(context) {}
| null | null | 220,023
|
160630494393304743485611869069515993012
| 2
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
explicit AddManySparseToTensorsMapOp(OpKernelConstruction* context)
: SparseTensorAccessingOp(context) {}
| null | null | 220,024
|
188730845065383980924459779393669614612
| 2
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
explicit AddSparseToTensorsMapOp(OpKernelConstruction* context)
: SparseTensorAccessingOp(context) {}
| null | null | 220,025
|
171942714327714055568215520962116816306
| 2
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
void Compute(OpKernelContext* context) override {
SparseTensorsMap* map = nullptr;
OP_REQUIRES_OK(context, GetMap(context, false /* is_writing */, &map));
const Tensor& sparse_handles = context->input(0);
OP_REQUIRES(context, TensorShapeUtils::IsVector(sparse_handles.shape()),
errors::InvalidArgument(
"sparse_handles should be a vector but received shape ",
sparse_handles.shape().DebugString()));
int64_t N = sparse_handles.shape().dim_size(0);
OP_REQUIRES(
context, N > 0,
errors::InvalidArgument("Must have at least 1 serialized SparseTensor, "
"but input matrix has 0 rows"));
std::vector<Tensor> indices_to_concat;
std::vector<Tensor> values_to_concat;
std::vector<TensorShape> shapes_to_concat;
const auto& sparse_handles_t = sparse_handles.vec<int64_t>();
std::vector<SparseTensor> sparse_tensors;
OP_REQUIRES_OK(context, map->RetrieveAndClearSparseTensors(
context, sparse_handles_t, &sparse_tensors));
for (int64_t i = 0; i < N; ++i) {
const SparseTensor& st = sparse_tensors[i];
const Tensor& output_indices = st.indices();
const Tensor& output_values = st.values();
const auto output_shape = st.shape();
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(output_indices.shape()),
errors::InvalidArgument(
"Expected sparse_handles[", i,
"] to represent an index matrix but received shape ",
output_indices.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(output_values.shape()),
errors::InvalidArgument(
"Expected sparse_handles[", i,
"] to represent a values vector but received shape ",
output_values.shape().DebugString()));
OP_REQUIRES(
context, DataTypeToEnum<T>::value == output_values.dtype(),
errors::InvalidArgument(
"Requested SparseTensor of type ",
DataTypeString(DataTypeToEnum<T>::value), " but SparseTensor[", i,
"].values.dtype() == ", DataTypeString(output_values.dtype())));
int64_t num_entries = output_indices.dim_size(0);
OP_REQUIRES(context, num_entries == output_values.dim_size(0),
errors::InvalidArgument(
"Expected row counts of SparseTensor[", i,
"].indices and SparseTensor[", i,
"].values to match but they do not: ", num_entries,
" vs. ", output_values.dim_size(0)));
int rank = output_indices.dim_size(1);
OP_REQUIRES(
context, rank == output_shape.size(),
errors::InvalidArgument("Expected column counts of SparseTensor[", i,
"].indices to match size of SparseTensor[", i,
"].shape "
"but they do not: ",
rank, " vs. ", output_shape.size()));
// Now we expand each SparseTensors' indices and shape by
// prefixing a dimension
Tensor expanded_indices(
DT_INT64, TensorShape({num_entries, 1 + output_indices.dim_size(1)}));
Tensor expanded_shape(DT_INT64, TensorShape({1 + rank}));
const auto& output_indices_t = output_indices.matrix<int64_t>();
auto expanded_indices_t = expanded_indices.matrix<int64_t>();
auto expanded_shape_t = expanded_shape.vec<int64_t>();
expanded_indices_t.chip<1>(0).setZero();
Eigen::DSizes<Eigen::DenseIndex, 2> indices_start(0, 1);
Eigen::DSizes<Eigen::DenseIndex, 2> indices_sizes(num_entries, rank);
expanded_indices_t.slice(indices_start, indices_sizes) = output_indices_t;
expanded_shape_t(0) = 1;
// TODO: copy shape from TensorShape to &expanded_shape_t(1)
// std::copy_n(&output_shape_t(0), rank, &expanded_shape_t(1));
for (int i = 0; i < rank; ++i) {
expanded_shape_t(i + 1) = output_shape[i];
}
TensorShape expanded_tensor_shape(expanded_shape_t);
indices_to_concat.push_back(std::move(expanded_indices));
values_to_concat.push_back(output_values);
shapes_to_concat.push_back(std::move(expanded_tensor_shape));
}
int rank = -1;
for (int i = 0; i < N; ++i) {
if (rank < 0) rank = shapes_to_concat[i].dims();
OP_REQUIRES(context, rank == shapes_to_concat[i].dims(),
errors::InvalidArgument(
"Inconsistent rank across SparseTensors: rank prior to "
"SparseTensor[",
i, "] was: ", rank, " but rank of SparseTensor[", i,
"] is: ", shapes_to_concat[i].dims()));
}
// SparseTensor::Concat requires consistent shape for all but the
// primary order dimension (dimension 0 in this case). So we get
// the maximum value across all the input SparseTensors for each
// dimension and use that.
TensorShape preconcat_shape(shapes_to_concat[0]);
for (int i = 0; i < N; ++i) {
for (int d = 0; d < rank; ++d) {
preconcat_shape.set_dim(d, std::max(preconcat_shape.dim_size(d),
shapes_to_concat[i].dim_size(d)));
}
}
// Dimension 0 is the primary dimension.
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
std::vector<SparseTensor> tensors_to_concat;
tensors_to_concat.reserve(N);
for (int i = 0; i < N; ++i) {
SparseTensor tensor;
OP_REQUIRES_OK(context,
SparseTensor::Create(std::move(indices_to_concat[i]),
std::move(values_to_concat[i]),
preconcat_shape, std_order, &tensor));
tensors_to_concat.push_back(std::move(tensor));
}
auto output = SparseTensor::Concat<T>(tensors_to_concat);
Tensor final_output_shape(DT_INT64, TensorShape({output.dims()}));
std::copy_n(output.shape().data(), output.dims(),
final_output_shape.vec<int64_t>().data());
context->set_output(0, output.indices());
context->set_output(1, output.values());
context->set_output(2, final_output_shape);
}
| null | null | 220,026
|
322028574131742416642442554473046564072
| 141
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
explicit SparseTensorsMap(const string& name) : name_(name), counter_(0) {}
| null | null | 220,027
|
66708310744716716350839613533973368297
| 1
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
TensorShape input_shape_object;
OP_REQUIRES_OK(
context, TensorShapeUtils::MakeShape(input_shape->vec<int64_t>().data(),
input_shape->NumElements(),
&input_shape_object));
SparseTensor st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
input_shape_object, &st));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st, &handle));
Tensor sparse_handle(DT_INT64, TensorShape({}));
auto sparse_handle_t = sparse_handle.scalar<int64_t>();
sparse_handle_t() = handle;
context->set_output(0, sparse_handle);
}
| null | null | 220,028
|
291809024179453160303809543199365119258
| 44
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
explicit SparseTensorAccessingOp(OpKernelConstruction* context)
: OpKernel(context), sparse_tensors_map_(nullptr) {}
| null | null | 220,029
|
216499252012264984045969848061668675225
| 2
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
Status RetrieveAndClearSparseTensors(
OpKernelContext* ctx, const TTypes<int64_t>::ConstVec& handles,
std::vector<SparseTensor>* sparse_tensors) {
sparse_tensors->clear();
sparse_tensors->reserve(handles.size());
{
mutex_lock l(mu_);
for (size_t i = 0; i < handles.size(); ++i) {
const int64_t handle = handles(i);
auto sp_iter = sp_tensors_.find(handle);
if (sp_iter == sp_tensors_.end()) {
return errors::InvalidArgument(
"Unable to find SparseTensor: ", handle, " in map: ", name_);
}
const Tensor* ix = &sp_iter->second.indices;
const Tensor* values = &sp_iter->second.values;
const auto& shape = sp_iter->second.shape;
SparseTensor tensor;
TF_RETURN_IF_ERROR(SparseTensor::Create(*ix, *values, shape, &tensor));
sparse_tensors->push_back(std::move(tensor));
sp_tensors_.erase(sp_iter);
}
}
return Status::OK();
}
| null | null | 220,030
|
138463372034115928244842389959589778680
| 26
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
string DebugString() const override { return "A SparseTensorsMap"; }
| null | null | 220,031
|
204704520500878489134248594586064968222
| 1
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
Status AddSparseTensor(OpKernelContext* ctx, const SparseTensor& sp,
int64_t* handle) {
Tensor ix;
TF_RETURN_IF_ERROR(
ctx->allocate_temp(sp.indices().dtype(), sp.indices().shape(), &ix));
ix = sp.indices();
Tensor values;
TF_RETURN_IF_ERROR(ctx->allocate_temp(sp.indices().dtype(),
sp.indices().shape(), &values));
values = sp.values();
{
mutex_lock l(mu_);
int64_t unique_st_handle = counter_++; // increment is guarded on purpose
sp_tensors_[unique_st_handle] = PersistentSparseTensor{
ix, values,
gtl::InlinedVector<int64_t, 8>(sp.shape().begin(), sp.shape().end())};
*handle = unique_st_handle;
}
return Status::OK();
}
| null | null | 220,032
|
93242493049230499168782414580364789320
| 21
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
tensorflow
|
a68f68061e263a88321c104a6c911fe5598050a8
| 0
|
~SparseTensorAccessingOp() override {
if (sparse_tensors_map_) sparse_tensors_map_->Unref();
}
| null | null | 220,033
|
164426448257381627921846543105561312717
| 3
|
Replace faulty overflow check with a builder for `TensorShape`.
Prevents an integer overflow that was not caught before.
PiperOrigin-RevId: 415381595
Change-Id: I76585ddedc912bd9f4a390aeafa8e2ced1a28863
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
struct iattr attr;
int err;
/*
* If no cached dentry exists or if it's negative, NFSv4 handled the
* opens in ->lookup() or ->create().
*
* We only get this far for a cached positive dentry. We skipped
* revalidation, so handle it here by dropping the dentry and returning
* -EOPENSTALE. The VFS will retry the lookup/create/open.
*/
dprintk("NFS: open file(%pd2)\n", dentry);
err = nfs_check_flags(openflags);
if (err)
return err;
if ((openflags & O_ACCMODE) == 3)
openflags--;
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
parent = dget_parent(dentry);
dir = d_inode(parent);
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
attr.ia_valid = ATTR_OPEN;
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
default:
goto out_put_ctx;
case -ENOENT:
case -ESTALE:
case -EISDIR:
case -ENOTDIR:
case -ELOOP:
goto out_drop;
}
}
if (inode != d_inode(dentry))
goto out_drop;
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
out_put_ctx:
put_nfs_open_context(ctx);
out:
dput(parent);
return err;
out_drop:
d_drop(dentry);
err = -EOPENSTALE;
goto out_put_ctx;
}
| null | null | 220,100
|
233559108962312996252493961873838685323
| 78
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static int nfs4_setlease(struct file *file, long arg, struct file_lock **lease,
void **priv)
{
return nfs4_proc_setlease(file, arg, lease, priv);
}
| null | null | 220,101
|
106622403096591979667524053044446293860
| 5
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
nfs4_file_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file_inode(file);
errseq_t since;
dprintk("NFS: flush(%pD2)\n", file);
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
/*
* If we're holding a write delegation, then check if we're required
* to flush the i/o on close. If not, then just start the i/o now.
*/
if (!nfs4_delegation_flush_on_close(inode))
return filemap_fdatawrite(file->f_mapping);
/* Flush writes to the server and return any errors */
since = filemap_sample_wb_err(file->f_mapping);
nfs_wb_all(inode);
return filemap_check_wb_err(file->f_mapping, since);
}
| null | null | 220,102
|
106203631317300480739880184491441451344
| 23
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
struct nfs_fh *src_fh, nfs4_stateid *stateid)
{
struct nfs_fattr *fattr = nfs_alloc_fattr();
struct file *filep, *res;
struct nfs_server *server;
struct inode *r_ino = NULL;
struct nfs_open_context *ctx;
struct nfs4_state_owner *sp;
char *read_name = NULL;
int len, status = 0;
server = NFS_SERVER(ss_mnt->mnt_root->d_inode);
if (!fattr)
return ERR_PTR(-ENOMEM);
status = nfs4_proc_getattr(server, src_fh, fattr, NULL);
if (status < 0) {
res = ERR_PTR(status);
goto out;
}
res = ERR_PTR(-ENOMEM);
len = strlen(SSC_READ_NAME_BODY) + 16;
read_name = kzalloc(len, GFP_KERNEL);
if (read_name == NULL)
goto out;
snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++);
r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr);
if (IS_ERR(r_ino)) {
res = ERR_CAST(r_ino);
goto out_free_name;
}
filep = alloc_file_pseudo(r_ino, ss_mnt, read_name, O_RDONLY,
r_ino->i_fop);
if (IS_ERR(filep)) {
res = ERR_CAST(filep);
goto out_free_name;
}
ctx = alloc_nfs_open_context(filep->f_path.dentry, filep->f_mode,
filep);
if (IS_ERR(ctx)) {
res = ERR_CAST(ctx);
goto out_filep;
}
res = ERR_PTR(-EINVAL);
sp = nfs4_get_state_owner(server, ctx->cred, GFP_KERNEL);
if (sp == NULL)
goto out_ctx;
ctx->state = nfs4_get_open_state(r_ino, sp);
if (ctx->state == NULL)
goto out_stateowner;
set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags);
memcpy(&ctx->state->open_stateid.other, &stateid->other,
NFS4_STATEID_OTHER_SIZE);
update_open_stateid(ctx->state, stateid, NULL, filep->f_mode);
set_bit(NFS_OPEN_STATE, &ctx->state->flags);
nfs_file_set_open_context(filep, ctx);
put_nfs_open_context(ctx);
file_ra_state_init(&filep->f_ra, filep->f_mapping->host->i_mapping);
res = filep;
out_free_name:
kfree(read_name);
out:
nfs_free_fattr(fattr);
return res;
out_stateowner:
nfs4_put_state_owner(sp);
out_ctx:
put_nfs_open_context(ctx);
out_filep:
fput(filep);
goto out_free_name;
}
| null | null | 220,103
|
234003578338278807318940048027530906614
| 83
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
struct nfs42_copy_notify_res *cn_resp = NULL;
struct nl4_server *nss = NULL;
nfs4_stateid *cnrs = NULL;
ssize_t ret;
bool sync = false;
/* Only offload copy if superblock is the same */
if (file_in->f_op != &nfs4_file_operations)
return -EXDEV;
if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) ||
!nfs_server_capable(file_inode(file_in), NFS_CAP_COPY))
return -EOPNOTSUPP;
if (file_inode(file_in) == file_inode(file_out))
return -EOPNOTSUPP;
/* if the copy size if smaller than 2 RPC payloads, make it
* synchronous
*/
if (count <= 2 * NFS_SERVER(file_inode(file_in))->rsize)
sync = true;
retry:
if (!nfs42_files_from_same_server(file_in, file_out)) {
/*
* for inter copy, if copy size is too small
* then fallback to generic copy.
*/
if (sync)
return -EOPNOTSUPP;
cn_resp = kzalloc(sizeof(struct nfs42_copy_notify_res),
GFP_KERNEL);
if (unlikely(cn_resp == NULL))
return -ENOMEM;
ret = nfs42_proc_copy_notify(file_in, file_out, cn_resp);
if (ret) {
ret = -EOPNOTSUPP;
goto out;
}
nss = &cn_resp->cnr_src;
cnrs = &cn_resp->cnr_stateid;
}
ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count,
nss, cnrs, sync);
out:
kfree(cn_resp);
if (ret == -EAGAIN)
goto retry;
return ret;
}
| null | null | 220,104
|
94245929449273436030542999712052062192
| 53
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
void nfs42_ssc_unregister_ops(void)
{
nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl);
}
| null | null | 220,105
|
69226007490268851241654306338908808370
| 4
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
struct file *dst_file, loff_t dst_off, loff_t count,
unsigned int remap_flags)
{
struct inode *dst_inode = file_inode(dst_file);
struct nfs_server *server = NFS_SERVER(dst_inode);
struct inode *src_inode = file_inode(src_file);
unsigned int bs = server->clone_blksize;
bool same_inode = false;
int ret;
/* NFS does not support deduplication. */
if (remap_flags & REMAP_FILE_DEDUP)
return -EOPNOTSUPP;
if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL;
if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode))
return -ETXTBSY;
/* check alignment w.r.t. clone_blksize */
ret = -EINVAL;
if (bs) {
if (!IS_ALIGNED(src_off, bs) || !IS_ALIGNED(dst_off, bs))
goto out;
if (!IS_ALIGNED(count, bs) && i_size_read(src_inode) != (src_off + count))
goto out;
}
if (src_inode == dst_inode)
same_inode = true;
/* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
if (same_inode) {
inode_lock(src_inode);
} else if (dst_inode < src_inode) {
inode_lock_nested(dst_inode, I_MUTEX_PARENT);
inode_lock_nested(src_inode, I_MUTEX_CHILD);
} else {
inode_lock_nested(src_inode, I_MUTEX_PARENT);
inode_lock_nested(dst_inode, I_MUTEX_CHILD);
}
/* flush all pending writes on both src and dst so that server
* has the latest data */
ret = nfs_sync_inode(src_inode);
if (ret)
goto out_unlock;
ret = nfs_sync_inode(dst_inode);
if (ret)
goto out_unlock;
ret = nfs42_proc_clone(src_file, dst_file, src_off, dst_off, count);
/* truncate inode page cache of the dst range so that future reads can fetch
* new data from server */
if (!ret)
truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
out_unlock:
if (same_inode) {
inode_unlock(src_inode);
} else if (dst_inode < src_inode) {
inode_unlock(src_inode);
inode_unlock(dst_inode);
} else {
inode_unlock(dst_inode);
inode_unlock(src_inode);
}
out:
return ret < 0 ? ret : count;
}
| null | null | 220,106
|
139851599081091867057839181463427925826
| 73
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
void nfs42_ssc_register_ops(void)
{
nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl);
}
| null | null | 220,107
|
112663848358405239974447616440857973724
| 4
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static void __nfs42_ssc_close(struct file *filep)
{
struct nfs_open_context *ctx = nfs_file_open_context(filep);
ctx->state->flags = 0;
}
| null | null | 220,108
|
133955580675360560295233777385760629174
| 6
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(filep);
long ret;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)))
return -EOPNOTSUPP;
ret = inode_newsize_ok(inode, offset + len);
if (ret < 0)
return ret;
if (mode & FALLOC_FL_PUNCH_HOLE)
return nfs42_proc_deallocate(filep, offset, len);
return nfs42_proc_allocate(filep, offset, len);
}
| null | null | 220,109
|
236769994375470539751119970709877832037
| 19
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
ssize_t ret;
ret = __nfs4_copy_file_range(file_in, pos_in, file_out, pos_out, count,
flags);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
ret = generic_copy_file_range(file_in, pos_in, file_out,
pos_out, count, flags);
return ret;
}
| null | null | 220,110
|
241355851477577520187342880727953419165
| 13
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
linux
|
ab0fc21bc7105b54bafd85bd8b82742f9e68898a
| 0
|
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
{
loff_t ret;
switch (whence) {
case SEEK_HOLE:
case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence);
if (ret != -EOPNOTSUPP)
return ret;
fallthrough;
default:
return nfs_file_llseek(filep, offset, whence);
}
}
| null | null | 220,111
|
127559319133838142312200070204577695003
| 15
|
Revert "NFSv4: Handle the special Linux file open access mode"
This reverts commit 44942b4e457beda00981f616402a1a791e8c616e.
After secondly opening a file with O_ACCMODE|O_DIRECT flags,
nfs4_valid_open_stateid() will dereference NULL nfs4_state when lseek().
Reproducer:
1. mount -t nfs -o vers=4.2 $server_ip:/ /mnt/
2. fd = open("/mnt/file", O_ACCMODE|O_DIRECT|O_CREAT)
3. close(fd)
4. fd = open("/mnt/file", O_ACCMODE|O_DIRECT)
5. lseek(fd)
Reported-by: Lyu Tao <[email protected]>
Signed-off-by: ChenXiaoSong <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
FileFormat ClassifyFileFormat(StringPiece data) {
if (absl::StartsWith(data, kJpegMagicBytes)) return kJpgFormat;
if (absl::StartsWith(data, kPngMagicBytes)) return kPngFormat;
if (absl::StartsWith(data, kGifMagicBytes)) return kGifFormat;
if (absl::StartsWith(data, kBmpMagicBytes)) return kBmpFormat;
return kUnknownFormat;
}
| null | null | 220,162
|
200275655930054321399972655181726960056
| 7
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void DecodeImageV2Op::DecodeBMP(const uint8* input, const int row_size,
uint8* const output, const int width,
const int height, const int output_channels,
const int input_channels, bool top_down) {
for (int i = 0; i < height; i++) {
int src_pos;
int dst_pos;
for (int j = 0; j < width; j++) {
if (!top_down) {
src_pos = ((height - 1 - i) * row_size) + j * input_channels;
} else {
src_pos = i * row_size + j * input_channels;
}
dst_pos = (i * width + j) * output_channels;
switch (input_channels) {
case 1:
output[dst_pos] = input[src_pos];
// Set 2nd and 3rd channels if user requested for 3 or 4 channels.
// Repeat 1st channel's value.
if (output_channels == 3 || output_channels == 4) {
output[dst_pos + 1] = input[src_pos];
output[dst_pos + 2] = input[src_pos];
}
// Set 4th channel (alpha) to maximum value if user requested for
// 4 channels.
if (output_channels == 4) {
output[dst_pos + 3] = UINT8_MAX;
}
break;
case 3:
// BGR -> RGB
output[dst_pos] = input[src_pos + 2];
output[dst_pos + 1] = input[src_pos + 1];
output[dst_pos + 2] = input[src_pos];
// Set 4th channel (alpha) to maximum value if the user requested for
// 4 channels and the input image has 3 channels only.
if (output_channels == 4) {
output[dst_pos + 3] = UINT8_MAX;
}
break;
case 4:
// BGRA -> RGBA
output[dst_pos] = input[src_pos + 2];
output[dst_pos + 1] = input[src_pos + 1];
output[dst_pos + 2] = input[src_pos];
// Set 4th channel only if the user requested for 4 channels. If not,
// then user requested 3 channels; skip this step.
if (output_channels == 4) {
output[dst_pos + 3] = input[src_pos + 3];
}
break;
default:
LOG(FATAL) << "Unexpected number of channels: " << input_channels;
break;
}
}
}
}
| null | null | 220,163
|
237256207565151187443987467579721155375
| 61
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
inline int32 ByteSwapInt32ForBigEndian(int32_t x) {
if (!port::kLittleEndian) {
return BYTE_SWAP_32(x);
} else {
return x;
}
}
| null | null | 220,164
|
122857016569606321798381441843438895878
| 7
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void DecodeBmpV2(OpKernelContext* context, StringPiece input) {
OP_REQUIRES(
context, channels_ != 1,
errors::InvalidArgument(
"`channels` must be 0, 3 or 4 for BMP, but got ", channels_));
if (op_type_ != "DecodeBmp" && op_type_ != "DecodeImage") {
if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected BMP."));
} else {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode BMP format using a wrong op. Use "
"`decode_bmp` or `decode_image` instead. Op used: ",
op_type_));
}
}
OP_REQUIRES(context, (32 <= input.size()),
errors::InvalidArgument("Incomplete bmp content, requires at "
"least 32 bytes to find the header "
"size, width, height, and bpp, got ",
input.size(), " bytes"));
const uint8* img_bytes = reinterpret_cast<const uint8*>(input.data());
int32_t header_size_ = internal::SubtleMustCopy(
*(reinterpret_cast<const int32*>(img_bytes + 10)));
const int32_t header_size = ByteSwapInt32ForBigEndian(header_size_);
int32_t width_ = internal::SubtleMustCopy(
*(reinterpret_cast<const int32*>(img_bytes + 18)));
const int32_t width = ByteSwapInt32ForBigEndian(width_);
int32_t height_ = internal::SubtleMustCopy(
*(reinterpret_cast<const int32*>(img_bytes + 22)));
const int32_t height = ByteSwapInt32ForBigEndian(height_);
int16_t bpp_ = internal::SubtleMustCopy(
*(reinterpret_cast<const int16*>(img_bytes + 28)));
const int16_t bpp = ByteSwapInt16ForBigEndian(bpp_);
// `channels_` is desired number of channels. `img_channels` is number of
// channels inherent in the image.
int img_channels = bpp / 8;
OP_REQUIRES(
context, (img_channels == 1 || img_channels == 3 || img_channels == 4),
errors::InvalidArgument(
"Number of channels inherent in the image must be 1, 3 or 4, was ",
img_channels));
const int requested_channels = channels_ ? channels_ : img_channels;
OP_REQUIRES(context, width > 0,
errors::InvalidArgument("Width must be positive"));
OP_REQUIRES(context, height != 0,
errors::InvalidArgument("Height must be nonzero"));
OP_REQUIRES(context, header_size >= 0,
errors::InvalidArgument("header size must be nonnegative"));
// The real requirement is < 2^31 minus some headers and channel data,
// so rounding down to something that's still ridiculously big.
OP_REQUIRES(
context,
(static_cast<int64_t>(width) * std::abs(static_cast<int64_t>(height))) <
static_cast<int64_t>(std::numeric_limits<int32_t>::max() / 8),
errors::InvalidArgument(
"Total possible pixel bytes must be less than 2^30"));
const int32_t abs_height = abs(height);
// there may be padding bytes when the width is not a multiple of 4 bytes
const int row_size = (img_channels * width + 3) / 4 * 4;
// Make sure the size of input data matches up with the total size of
// headers plus height * row_size.
int size_diff = input.size() - header_size - (row_size * abs_height);
OP_REQUIRES(
context, size_diff == 0,
errors::InvalidArgument(
"Input size should match (header_size + row_size * abs_height) but "
"they differ by ",
size_diff));
const int64_t last_pixel_offset = static_cast<int64_t>(header_size) +
(abs_height - 1) * row_size +
(width - 1) * img_channels;
// [expected file size] = [last pixel offset] + [last pixel size=channels]
const int64_t expected_file_size = last_pixel_offset + img_channels;
OP_REQUIRES(
context, (expected_file_size <= input.size()),
errors::InvalidArgument("Incomplete bmp content, requires at least ",
expected_file_size, " bytes, got ",
input.size(), " bytes"));
// if height is negative, data layout is top down
// otherwise, it's bottom up.
bool top_down = (height < 0);
// Decode image, allocating tensor once the image size is known.
Tensor* output = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({abs_height, width, requested_channels}), &output));
const uint8* bmp_pixels = &img_bytes[header_size];
if (data_type_ == DataType::DT_UINT8) {
DecodeBMP(bmp_pixels, row_size, output->flat<uint8>().data(), width,
abs_height, requested_channels, img_channels, top_down);
} else {
std::unique_ptr<uint8[]> buffer(
new uint8[height * width * requested_channels]);
DecodeBMP(bmp_pixels, row_size, buffer.get(), width, abs_height,
requested_channels, img_channels, top_down);
TTypes<uint8, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
requested_channels);
// Convert the raw uint8 buffer to desired dtype.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
if (data_type_ == DataType::DT_UINT16) {
uint16 scale = floor((std::numeric_limits<uint16>::max() + 1) /
(std::numeric_limits<uint8>::max() + 1));
// Fill output tensor with desired dtype.
output->tensor<uint16, 3>().device(device) = buf.cast<uint16>() * scale;
} else if (data_type_ == DataType::DT_FLOAT) {
float scale = 1. / std::numeric_limits<uint8>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
}
| null | null | 220,165
|
254116344370308832033040754523026789892
| 133
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
explicit DecodeImageV2Op(OpKernelConstruction* context) : OpKernel(context) {
// Keep track of op string information because:
// [1] Currently by the API, PNG, JPEG and GIF can decode each other and
// depending on the op type, we need to return either 3-D or 4-D shapes.
// [2] Different ops have different attributes. e.g. `DecodeImage` op has
// `expand_animations` attribute that other ops don't.
// `DecodeAndDropJpeg` also has additional attributes.
op_type_ = type_string();
// Validate op type.
OP_REQUIRES(context,
op_type_ == "DecodeJpeg" || op_type_ == "DecodeAndCropJpeg" ||
op_type_ == "DecodePng" || op_type_ == "DecodeGif" ||
op_type_ == "DecodeBmp" || op_type_ == "DecodeImage",
errors::InvalidArgument("Bad op type ", op_type_));
// Get attributes from `DecodeJpeg` and `DecodeAndCropJpeg` op
// invocations. For `DecodeImage` op, set JPEG decoding setting to TF
// default.
if (op_type_ == "DecodeJpeg" || op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES_OK(context, context->GetAttr("ratio", &flags_.ratio));
OP_REQUIRES(context,
flags_.ratio == 1 || flags_.ratio == 2 || flags_.ratio == 4 ||
flags_.ratio == 8,
errors::InvalidArgument("ratio must be 1, 2, 4, or 8, got ",
flags_.ratio));
OP_REQUIRES_OK(context, context->GetAttr("fancy_upscaling",
&flags_.fancy_upscaling));
OP_REQUIRES_OK(context,
context->GetAttr("try_recover_truncated",
&flags_.try_recover_truncated_jpeg));
OP_REQUIRES_OK(context,
context->GetAttr("acceptable_fraction",
&flags_.min_acceptable_fraction));
string dct_method;
OP_REQUIRES_OK(context, context->GetAttr("dct_method", &dct_method));
OP_REQUIRES(
context,
(dct_method.empty() || dct_method == "INTEGER_FAST" ||
dct_method == "INTEGER_ACCURATE"),
errors::InvalidArgument("dct_method must be one of "
"{'', 'INTEGER_FAST', 'INTEGER_ACCURATE'}"));
// The TensorFlow-chosen default for JPEG decoding is IFAST, sacrificing
// image quality for speed.
if (dct_method.empty() || dct_method == "INTEGER_FAST") {
flags_.dct_method = JDCT_IFAST;
} else if (dct_method == "INTEGER_ACCURATE") {
flags_.dct_method = JDCT_ISLOW;
}
} else {
flags_ = jpeg::UncompressFlags();
flags_.dct_method = JDCT_IFAST;
}
// Get `dtype` attribute from `DecodePng` or `DecodeImage` op invocations.
if (op_type_ == "DecodePng" || op_type_ == "DecodeImage") {
OP_REQUIRES_OK(context, context->GetAttr("dtype", &data_type_));
if (op_type_ == "DecodePng") {
OP_REQUIRES(
context,
data_type_ == DataType::DT_UINT8 ||
data_type_ == DataType::DT_UINT16,
errors::InvalidArgument(
"`dtype` for `DecodePng` must be unit8, unit16 but got: ",
data_type_));
} else {
OP_REQUIRES(context,
data_type_ == DataType::DT_UINT8 ||
data_type_ == DataType::DT_UINT16 ||
data_type_ == DataType::DT_FLOAT,
errors::InvalidArgument("`dtype` for `DecodeImage` must be "
"unit8, unit16, float but got: ",
data_type_));
OP_REQUIRES_OK(context, context->GetAttr("expand_animations",
&expand_animations_));
}
}
// Get `channels` attribute for all ops except `DecodeGif` op.
// `DecodeGif` doesn't have `channels` attribute but it supports 3
// channels by default.
if (op_type_ != "DecodeGif") {
OP_REQUIRES_OK(context, context->GetAttr("channels", &channels_));
OP_REQUIRES(
context,
channels_ == 0 || channels_ == 1 || channels_ == 3 || channels_ == 4,
errors::InvalidArgument("`channels` must be 0, 1, 3 or 4 but got ",
channels_));
} else {
channels_ = 3;
}
}
| null | null | 220,166
|
80418547912824608740287493797742624973
| 92
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void DecodeJpegV2(OpKernelContext* context, StringPiece input) {
OP_REQUIRES(context, channels_ == 0 || channels_ == 1 || channels_ == 3,
errors::InvalidArgument("JPEG does not support 4 channels"));
// Use local copy of flags to avoid race condition as the class member is
// shared among different invocations.
jpeg::UncompressFlags flags = flags_;
flags.components = channels_;
if (op_type_ == "DecodeAndCropJpeg") {
flags.crop = true;
// Update flags to include crop window.
const Tensor& crop_window = context->input(1);
OP_REQUIRES(context, crop_window.dims() == 1,
errors::InvalidArgument("crop_window must be 1-D, got shape ",
crop_window.shape().DebugString()));
OP_REQUIRES(context, crop_window.dim_size(0) == 4,
errors::InvalidArgument("crop_size must have four elements ",
crop_window.shape().DebugString()));
auto crop_window_vec = crop_window.vec<int32>();
flags.crop_y = crop_window_vec(0);
flags.crop_x = crop_window_vec(1);
flags.crop_height = crop_window_vec(2);
flags.crop_width = crop_window_vec(3);
} else if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode JPEG format using DecodeBmp op. Use "
"`decode_jpeg` or `decode_image` instead."));
}
// Output tensor and the image buffer size.
Tensor* output = nullptr;
int buffer_size = 0;
// Decode JPEG. Directly allocate to the output buffer if data type is
// uint8 (to save extra copying). Otherwise, allocate a new uint8 buffer
// with buffer size. `jpeg::Uncompress` supports unit8 only.
uint8* buffer = jpeg::Uncompress(
input.data(), input.size(), flags, nullptr /* nwarn */,
[&](int width, int height, int channels) -> uint8* {
buffer_size = height * width * channels;
Status status;
// By the existing API, we support decoding JPEG with `DecodeGif`
// op. We need to make sure to return 4-D shapes when using
// `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, channels}), &output);
}
if (!status.ok()) {
VLOG(1) << status;
context->SetStatus(status);
return nullptr;
}
if (data_type_ == DataType::DT_UINT8) {
return output->flat<uint8>().data();
} else {
return new uint8[buffer_size];
}
});
OP_REQUIRES(
context, buffer,
errors::InvalidArgument(
"jpeg::Uncompress failed. Invalid JPEG data or crop window."));
// For when desired data type if unit8, the output buffer is already
// allocated during the `jpeg::Uncompress` call above; return.
if (data_type_ == DataType::DT_UINT8) {
return;
}
// Make sure we don't forget to deallocate `buffer`.
std::unique_ptr<uint8[]> buffer_unique_ptr(buffer);
// Convert uint8 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint8>::UnalignedConstFlat buffer_view(buffer, buffer_size);
if (data_type_ == DataType::DT_UINT16) {
uint16 scale = floor((std::numeric_limits<uint16>::max() + 1) /
(std::numeric_limits<uint8>::max() + 1));
// Fill output tensor with desired dtype.
output->flat<uint16>().device(device) =
buffer_view.cast<uint16>() * scale;
} else if (data_type_ == DataType::DT_FLOAT) {
float scale = 1. / std::numeric_limits<uint8>::max();
// Fill output tensor with desired dtype.
output->flat<float>().device(device) = buffer_view.cast<float>() * scale;
}
}
| null | null | 220,167
|
329515485235211082463617117455052289057
| 100
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// If we reach this point, then there is data in `decode` which must be
// freed by the time we end execution in this function. We cannot call
// `png::CommonFreeDecode()` before an `OP_REQUIRES` because if
// `OP_REQUIRES` constraint is satisfied then the data would be freed
// prematurely. Instead, let's use a `Cleanup` object.
auto cleanup = gtl::MakeCleanup([&decode]() {
std::cerr << "Cleanup called...\n";
png::CommonFreeDecode(&decode);
});
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output));
} else {
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output));
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
| null | null | 220,168
|
127581059212809583956166344576678605057
| 104
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void DecodeGifV2(OpKernelContext* context, StringPiece input) {
// GIF has 3 channels.
OP_REQUIRES(context, channels_ == 0 || channels_ == 3,
errors::InvalidArgument("channels must be 0 or 3 for GIF, got ",
channels_));
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode GIF format using DecodeBmp op. Use "
"`decode_gif` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected GIF."));
}
// Decode GIF, allocating tensor if dtype is uint8, otherwise defer tensor
// allocation til after dtype conversion is done. `gif`::Decode` supports
// uint8 only.
Tensor* output = nullptr;
int buffer_size = 0;
string error_string;
uint8* buffer = gif::Decode(
input.data(), input.size(),
[&](int num_frames, int width, int height, int channels) -> uint8* {
buffer_size = num_frames * height * width * channels;
Status status;
// By the existing API, we support decoding GIF with `decode_jpeg` or
// with `decode_png` if the GIF is a single-frame GIF (non-animated).
// We need to make sure to return 3-D shapes when using in this case.
if (op_type_ == "DecodePng" || op_type_ == "DecodeJpeg") {
if (num_frames == 1) {
status = context->allocate_output(
0, TensorShape({height, width, channels}), &output);
} else {
status = errors::InvalidArgument(
"Got ", num_frames, " frames, but animated gifs ",
"can only be decoded by tf.io.decode_gif or ",
"tf.io.decode_image");
}
} else if (op_type_ == "DecodeGif" ||
(op_type_ == "DecodeImage" && expand_animations_)) {
status = context->allocate_output(
0, TensorShape({num_frames, height, width, channels}), &output);
} else if (op_type_ == "DecodeImage" && !expand_animations_) {
status = context->allocate_output(
0, TensorShape({height, width, channels}), &output);
} else {
status = errors::InvalidArgument("Bad op type ", op_type_);
}
if (!status.ok()) {
VLOG(1) << status;
context->SetStatus(status);
return nullptr;
}
if (data_type_ == DataType::DT_UINT8) {
return output->flat<uint8>().data();
} else {
return new uint8[buffer_size];
}
},
&error_string, expand_animations_);
OP_REQUIRES(context, buffer,
errors::InvalidArgument("Invalid GIF data (size ", input.size(),
"), ", error_string));
// For when desired data type is uint8, the output buffer is already
// allocated during the `gif::Decode` call above; return.
if (data_type_ == DataType::DT_UINT8) {
return;
}
// Make sure we don't forget to deallocate `buffer`.
std::unique_ptr<uint8[]> buffer_unique_ptr(buffer);
// Convert the raw uint8 buffer to desired dtype.
// Use eigen threadpooling to speed up the copy operation.
TTypes<uint8>::UnalignedConstFlat buffer_view(buffer, buffer_size);
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
if (data_type_ == DataType::DT_UINT16) {
uint16 scale = floor((std::numeric_limits<uint16>::max() + 1) /
(std::numeric_limits<uint8>::max() + 1));
// Fill output tensor with desired dtype.
output->flat<uint16>().device(device) =
buffer_view.cast<uint16>() * scale;
} else if (data_type_ == DataType::DT_FLOAT) {
float scale = 1. / std::numeric_limits<uint8>::max();
// Fill output tensor with desired dtype.
output->flat<float>().device(device) = buffer_view.cast<float>() * scale;
}
}
| null | null | 220,169
|
181542132700962732917178655039516039938
| 100
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
inline int16 ByteSwapInt16ForBigEndian(int16_t x) {
if (!port::kLittleEndian) {
return BYTE_SWAP_16(x);
} else {
return x;
}
}
| null | null | 220,170
|
123699840778751448725075579797003308327
| 7
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
ab51e5b813573dc9f51efa335aebcf2994125ee9
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& contents = context->input(0);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(contents.shape()),
errors::InvalidArgument("`contents` must be scalar but got shape",
contents.shape().DebugString()));
const StringPiece input = contents.scalar<tstring>()();
OP_REQUIRES(context, !input.empty(),
errors::InvalidArgument("Input is empty."));
OP_REQUIRES(context, input.size() <= std::numeric_limits<int>::max(),
errors::InvalidArgument(
"Input contents are too large for int: ", input.size()));
// Parse magic bytes to determine file format.
switch (ClassifyFileFormat(input)) {
case kJpgFormat:
DecodeJpegV2(context, input);
break;
case kPngFormat:
DecodePngV2(context, input);
break;
case kGifFormat:
DecodeGifV2(context, input);
break;
case kBmpFormat:
DecodeBmpV2(context, input);
break;
case kUnknownFormat:
OP_REQUIRES(context, false,
errors::InvalidArgument("Unknown image file format. One of "
"JPEG, PNG, GIF, BMP required."));
break;
}
}
| null | null | 220,171
|
134890628989413498912780776226337504582
| 34
|
Prevent memory leak in decoding PNG images.
PiperOrigin-RevId: 409300653
Change-Id: I6182124c545989cef80cefd439b659095920763b
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Status Node::input_tensor(int idx, OutputTensor* t) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
DCHECK(e != nullptr);
*t = OutputTensor(e->src(), e->src_output());
return Status::OK();
}
| null | null | 220,172
|
222128822092323596434650272926207540811
| 7
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
uint64 InputTensor::Hash::operator()(InputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
| null | null | 220,173
|
268719964837569891328547809598531669049
| 4
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const Edge* Graph::AddEdge(Node* source, int x, Node* dest, int y) {
TF_DCHECK_OK(IsValidNode(source)) << source->DebugString();
TF_DCHECK_OK(IsValidNode(dest)) << dest->DebugString();
// source/sink must only be linked via control slots, and
// control slots must only be linked to control slots.
if (source == source_node() || dest == sink_node() || x == kControlSlot ||
y == kControlSlot) {
DCHECK_EQ(x, kControlSlot) << source->DebugString();
DCHECK_EQ(y, kControlSlot) << dest->DebugString();
}
Edge* e = nullptr;
if (free_edges_.empty()) {
e = new (arena_.Alloc(sizeof(Edge))) Edge; // placement new
} else {
e = free_edges_.back();
free_edges_.pop_back();
}
e->id_ = edges_.size();
e->src_ = source;
e->dst_ = dest;
e->src_output_ = x;
e->dst_input_ = y;
CHECK(source->out_edges_.insert(e).second);
CHECK(dest->in_edges_.insert(e).second);
edges_.push_back(e);
++num_edges_;
if (!e->IsControlEdge()) {
if (dest->in_edges_.size() >= dest->props_->input_types.size()) {
// Note: this only produces consistent results at graph construction,
// and only when all incoming edges are up-to-date.
// If the graph is subsequently modified, or if the node is added before
// any of its upstream nodes, this type information would change as well.
// In general, graph transformations should run shole-graph type inference
// when done, and should not rely on types being fully up to date
// after each AddNode.
// TODO(mdan): Should we even run type inference here any more?
dest->RunForwardTypeInference();
}
}
return e;
}
| null | null | 220,174
|
151239028286673176550452398025638880934
| 45
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::ClearTypeInfo() {
if (props_->node_def.has_experimental_type()) {
MaybeCopyOnWrite();
props_->node_def.clear_experimental_type();
}
}
| null | null | 220,175
|
151238099496368934321498740476918288375
| 6
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
gtl::iterator_range<NeighborIter> Node::out_nodes() const {
return gtl::make_range(NeighborIter(out_edges_.begin(), false),
NeighborIter(out_edges_.end(), false));
}
| null | null | 220,176
|
228256421091009635200746755320396287324
| 4
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::ClearAttr(const std::string& name) {
MaybeCopyOnWrite();
(*props_->node_def.mutable_attr()).erase(name);
}
| null | null | 220,177
|
295014743024982824832398426212326547946
| 4
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Status Node::input_edge(int idx, const Edge** e) const {
if (idx < 0 || idx >= num_inputs()) {
return errors::InvalidArgument("Invalid input_edge index: ", idx, ", Node ",
name(), " only has ", num_inputs(),
" inputs.");
}
// This does a linear search over the edges. In the common case,
// the number of elements is small enough that this search isn't
// expensive. Should it become a bottleneck, one can make an
// optimization where, if the number of edges is small, we use
// linear iteration, and if the number of edges is large, we perform
// an indexing step during construction that keeps an array of Edges
// indexed by pointer. This would keep the size of each Node small
// in the common case but make this function faster when the number
// of edges is large.
for (const Edge* edge : in_edges()) {
if (edge->dst_input() == idx) {
*e = edge;
return Status::OK();
}
}
return errors::NotFound("Could not find input edge ", idx, " for ", name());
}
| null | null | 220,178
|
54213095586084851720511108334120505483
| 25
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::set_original_func_names(const std::vector<std::string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_func_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_func_names() = {names.begin(), names.end()};
}
}
| null | null | 220,179
|
319937459566660791086929718755777944951
| 9
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
NodeDebugInfo::NodeDebugInfo(const NodeDef& ndef)
: NodeDebugInfo(ndef.name(), ndef.has_experimental_debug_info(),
ndef.experimental_debug_info()) {}
| null | null | 220,180
|
11271278126087689943808313726117030717
| 3
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::RemoveEdge(const Edge* e) {
TF_DCHECK_OK(IsValidNode(e->src_)) << e->src_->DebugString();
TF_DCHECK_OK(IsValidNode(e->dst_)) << e->dst_->DebugString();
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
CHECK_EQ(e, edges_[e->id_]);
CHECK_GT(num_edges_, 0);
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
if (!e->IsControlEdge()) {
// This may clear the node type if enough edges are removed.
e->dst_->RunForwardTypeInference();
}
}
| null | null | 220,181
|
164619838863775814498447909190189335026
| 17
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
std::string Graph::NewName(StringPiece prefix) {
return strings::StrCat(prefix, "/_", name_counter_++);
}
| null | null | 220,182
|
314279242421994967020293734897458042117
| 3
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::set_requested_device(const std::string& device) {
MaybeCopyOnWrite();
props_->node_def.set_device(device);
}
| null | null | 220,183
|
169251717655168160912852457378885553431
| 4
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Status Graph::AddWhileInputHack(Node* new_src, int new_src_index, Node* dst) {
if (!dst->IsWhileNode()) {
return errors::Internal(
"dst argument to AddWhileEdgeHack should be a While op, got: ",
dst->DebugString());
}
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
// Find the current number of data inputs. We'll add the new edge to the next
// missing data input.
int dst_index = 0;
for (const Edge* edge : dst->in_edges()) {
if (edge->IsControlEdge()) continue;
++dst_index;
}
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
dst->props_->node_def.add_input(
strings::StrCat(new_src->name(), ":", new_src_index));
return Status::OK();
}
| null | null | 220,184
|
176624140071501764374320554244517777404
| 21
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const std::string& Node::name() const { return props_->node_def.name(); }
| null | null | 220,185
|
215850377228243398834038896544694123286
| 1
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::ReleaseNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
nodes_[node->id()] = nullptr;
free_nodes_.push_back(node);
--num_nodes_;
node->Clear();
}
| null | null | 220,186
|
257044149848112466739208598462573961277
| 7
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void AddInput(NodeDef* dst, StringPiece src_name, int src_slot) {
if (src_slot == Graph::kControlSlot) {
dst->add_input(strings::StrCat("^", src_name));
} else if (src_slot == 0) {
dst->add_input(src_name.data(), src_name.size());
} else {
dst->add_input(strings::StrCat(src_name, ":", src_slot));
}
}
| null | null | 220,187
|
197653396410109083653632508147288930183
| 9
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::Clear() {
in_edges_.clear();
out_edges_.clear();
id_ = -1;
cost_id_ = -1;
class_ = NC_UNINITIALIZED;
props_.reset();
assigned_device_name_index_ = 0;
}
| null | null | 220,188
|
95601681008553803765438284049798646538
| 9
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const OpDef& Node::op_def() const { return *props_->op_def; }
| null | null | 220,189
|
84319288909660154026269617028550224472
| 1
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
std::string Node::DebugString() const {
std::string ret = strings::StrCat("{name:'", name(), "' id:", id_);
if (IsSource()) {
strings::StrAppend(&ret, " source}");
} else if (IsSink()) {
strings::StrAppend(&ret, " sink}");
} else {
strings::StrAppend(&ret, " op device:", "{requested: '", requested_device(),
"', assigned: '", assigned_device_name(), "'}", " def:{",
SummarizeNode(*this), "}}");
}
return ret;
}
| null | null | 220,190
|
260241675154688003479773152888871557235
| 13
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::RemoveControlEdge(const Edge* e) {
if (!e->src_->IsSource() && !e->dst_->IsSink()) {
e->dst_->MaybeCopyOnWrite();
std::string e_src_name = strings::StrCat("^", e->src_->name());
auto* inputs = e->dst_->props_->node_def.mutable_input();
for (auto it = inputs->begin(); it != inputs->end(); ++it) {
if (*it == e_src_name) {
inputs->erase(it);
break;
}
}
}
RemoveEdge(e);
}
| null | null | 220,191
|
236380040219005851717040416118004890657
| 14
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const Edge* FindEdge(const Node* dst, int index) {
for (const Edge* e : dst->in_edges()) {
if (e->dst_input() == index) return e;
}
return nullptr;
}
| null | null | 220,192
|
279302332503984637497689590389179007036
| 6
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Node* Graph::AllocateNode(std::shared_ptr<NodeProperties> props,
const Node* cost_node, Node::NodeClass node_class) {
Node* node = nullptr;
if (free_nodes_.empty()) {
node = new (arena_.Alloc(sizeof(Node))) Node; // placement new
} else {
node = free_nodes_.back();
free_nodes_.pop_back();
}
node->graph_ = this;
const int id = nodes_.size();
int cost_id = cost_node ? cost_node->cost_id() : id;
node->Initialize(id, cost_id, std::move(props), node_class);
nodes_.push_back(node);
++num_nodes_;
return node;
}
| null | null | 220,193
|
237302880382373953354144775701846769113
| 17
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
int Graph::InternDeviceName(const std::string& device_name) {
// Special case, very common. Also, this allows us to use a single map
// lookup below, instead of two. The 'if (index_cell > 0)' test below
// relies on this check.
if (device_name.empty()) {
return 0;
}
int& index_cell = device_names_map_[device_name];
if (index_cell > 0) {
return index_cell;
}
const int index = device_names_map_.size();
index_cell = index;
device_names_.push_back(device_name);
return index;
}
| null | null | 220,194
|
111896480430550963789205950828476726464
| 18
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
std::unordered_map<std::string, Node*> Graph::BuildNodeNameIndex() const {
std::unordered_map<std::string, Node*> result;
for (Node* n : nodes()) {
result[n->name()] = n;
}
return result;
}
| null | null | 220,195
|
40130966929469334371231861891952149526
| 7
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::RecycleEdge(const Edge* e) {
free_edges_.push_back(const_cast<Edge*>(e));
}
| null | null | 220,196
|
301806930579664263581958752693966893958
| 3
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::Copy(const Graph& src) {
SetConstructionContext(src.GetConstructionContextInternal());
for (Node* n : nodes()) {
CHECK(n->IsSource() || n->IsSink()) << "*dest must be empty";
}
// Copy GraphDef versions
set_versions(src.versions());
// Copy the nodes.
// "Node in src" -> "Node in *dest"
gtl::FlatMap<const Node*, Node*> node_map;
node_map.reserve(src.num_nodes());
node_map[src.source_node()] = source_node();
node_map[src.sink_node()] = sink_node();
for (Node* n : src.op_nodes()) {
auto copy = CopyNode(n);
copy->in_edges_.reserve(n->in_edges().size());
copy->out_edges_.reserve(n->out_edges().size());
node_map[n] = copy;
}
// Copy the edges
edges_.reserve(src.num_edges());
for (const Edge* e : src.edges()) {
Node* src_copy = node_map[e->src()];
Node* dst_copy = node_map[e->dst()];
AddEdge(src_copy, e->src_output(), dst_copy, e->dst_input());
}
}
| null | null | 220,197
|
11030426970600913307075188673392398501
| 30
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Status Graph::IsValidInputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_inputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of inputs: ", node->num_inputs(),
") does not have ", "input ", idx);
}
return Status::OK();
}
| null | null | 220,198
|
127168947867880093575292116366917593853
| 10
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Status Node::input_edges(std::vector<const Edge*>* input_edges) const {
input_edges->clear();
input_edges->resize(num_inputs(), nullptr);
for (const Edge* edge : in_edges()) {
if (edge->IsControlEdge()) continue;
if (edge->dst_input() < 0 || edge->dst_input() >= num_inputs()) {
return errors::Internal("Invalid edge input number ", edge->dst_input());
}
if ((*input_edges)[edge->dst_input()] != nullptr) {
return errors::Internal("Duplicate edge input number: ",
edge->dst_input());
}
(*input_edges)[edge->dst_input()] = edge;
}
for (int i = 0; i < num_inputs(); ++i) {
if ((*input_edges)[i] == nullptr) {
return errors::InvalidArgument("Missing edge input number: ", i);
}
}
return Status::OK();
}
| null | null | 220,199
|
162049066271820143359834296075934716415
| 23
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const DataTypeVector& Node::output_types() const {
return props_->output_types;
}
| null | null | 220,200
|
299029076954539478714424851268081798812
| 3
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::RunForwardTypeInference() {
VLOG(4) << "Forward type inference: " << props_->node_def.DebugString();
if (props_->fwd_type_fn == nullptr) {
return;
}
std::vector<Node*> input_nodes(props_->input_types.size(), nullptr);
std::vector<int> input_idx(props_->input_types.size(), 0);
for (const auto& edge : in_edges_) {
if (edge->IsControlEdge()) {
continue;
}
DCHECK(edge->dst_input() < input_nodes.size()) << DebugString();
int i = edge->dst_input();
input_nodes.at(i) = edge->src();
input_idx.at(i) = edge->src_output();
}
// Note: technically, we could use a very generic type when some of the inputs
// are unknown. But there is an expectation that a node will have complete
// inputs soon, so updating intermediate types is largely unnecessary.
for (const auto* node : input_nodes) {
if (node == nullptr) {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (int i = 0; i < input_nodes.size(); i++) {
const auto* node = input_nodes[i];
if (node->def().has_experimental_type()) {
const auto& node_t = node->def().experimental_type();
if (node_t.type_id() != TFT_UNSET) {
int ix = input_idx[i];
if (ix >= node_t.args_size()) {
LOG(WARNING) << name() << " has bad type information: input " << i
<< " should have an output " << ix
<< " but instead only has " << node_t.args_size()
<< " outputs: " << node_t.DebugString()
<< "\nThis indicates either "
"a bug in op registration or a corrupted graph.";
ClearTypeInfo();
return;
}
input_types.emplace_back(node_t.args(ix));
} else {
input_types.emplace_back(*no_type);
}
} else {
// Incomplete inputs, bail.
ClearTypeInfo();
return;
}
}
const auto infer_type = props_->fwd_type_fn(input_types);
const FullTypeDef infer_typedef = infer_type.ValueOrDie();
if (infer_typedef.type_id() != TFT_UNSET) {
MaybeCopyOnWrite();
*(props_->node_def.mutable_experimental_type()) = infer_typedef;
}
}
| null | null | 220,201
|
87598721652696471911492404165799823185
| 68
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
const DataTypeVector& Node::input_types() const { return props_->input_types; }
| null | null | 220,202
|
56425990079637547533402077549382279135
| 1
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
Graph::Graph(const OpRegistryInterface* ops)
: ops_(ops, FunctionDefLibrary()),
versions_(new VersionDef),
arena_(8 << 10 /* 8kB */) {
versions_->set_producer(TF_GRAPH_DEF_VERSION);
versions_->set_min_consumer(TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
// Initialize the name interning table for assigned_device_name.
device_names_.push_back("");
DCHECK_EQ(0, InternDeviceName(""));
// Source and sink have no endpoints, just control edges.
NodeDef def;
def.set_name("_SOURCE");
def.set_op("NoOp");
Status status;
Node* source = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(source->id(), kSourceId);
def.set_name("_SINK");
Node* sink = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(sink->id(), kSinkId);
AddControlEdge(source, sink);
}
| null | null | 220,203
|
98949389981589357985390137414231319668
| 27
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Node::MaybeCopyOnWrite() {
// TODO(mdan): As nodes become more dynamic, this may not be worth the cost.
// NodeProperties may be shared between Nodes. Make a copy if so.
if (!props_.unique()) {
props_ = std::make_shared<NodeProperties>(*props_);
}
}
| null | null | 220,204
|
69489960475694481786366626648614221285
| 7
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
tensorflow
|
c99d98cd189839dcf51aee94e7437b54b31f8abd
| 0
|
void Graph::ToGraphDefSubRange(GraphDef* graph_def, int from_node_id) const {
graph_def->Clear();
*graph_def->mutable_versions() = versions();
*graph_def->mutable_library() = ops_.ToProto();
graph_def->mutable_node()->Reserve(std::max(1, num_nodes() - from_node_id));
std::vector<const Edge*>
inputs; // Construct this outside the loop for speed.
for (auto id = from_node_id; id < num_node_ids(); ++id) {
const Node* node = FindNodeId(id);
if (node == nullptr || !node->IsOp()) continue;
NodeDef* node_def = graph_def->add_node();
*node_def = node->def();
// Use the node's assigned device, if any, instead of the device requested
// in the NodeDef.
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
// Get the inputs for this Node. We make sure control inputs are
// after data inputs, as required by GraphDef.
inputs.clear();
inputs.resize(node->num_inputs(), nullptr);
for (const Edge* edge : node->in_edges()) {
if (edge->IsControlEdge()) {
inputs.push_back(edge);
} else {
DCHECK(edge->dst_input() < inputs.size())
<< "Edge " << edge->DebugString()
<< " is overflowing the expected number of inputs ("
<< node->num_inputs() << ") for node " << node->DebugString();
CHECK(inputs[edge->dst_input()] == nullptr)
<< "Edge " << edge->src()->name() << "->" << edge->dst()->name()
<< " conflicts with pre-existing input edge "
<< inputs[edge->dst_input()]->src()->name() << "->"
<< inputs[edge->dst_input()]->dst()->name();
inputs[edge->dst_input()] = edge;
}
}
// Sort the control inputs for more predictable serialization.
std::sort(inputs.begin() + node->num_inputs(), inputs.end(),
[](const Edge* a, const Edge* b) -> bool {
return a->src()->name() < b->src()->name();
});
node_def->clear_input();
node_def->mutable_input()->Reserve(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
const Edge* edge = inputs[i];
if (edge == nullptr) {
if (i < node->requested_inputs().size()) {
node_def->add_input(node->requested_inputs()[i]);
} else {
node_def->add_input("");
}
} else {
const Node* src = edge->src();
if (!src->IsOp()) continue;
AddInput(node_def, src->name(), edge->src_output());
}
}
}
}
| null | null | 220,205
|
259255372966478832702081753694858887155
| 66
|
Handle invalid inputs instead of crashing.
PiperOrigin-RevId: 409549744
Change-Id: I7f5935b34b53f7e426a5462fcc027bdbf5dcda24
|
other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.