idx
int64 | project
string | commit_id
string | project_url
string | commit_url
string | commit_message
string | target
int64 | func
string | func_hash
string | file_name
string | file_hash
string | cwe
string | cve
string | cve_desc
string | nvd_url
string |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
215,103
|
mongo
|
a5e2f9b0a236462a6d1ca129583c617f111367b4
|
https://github.com/mongodb/mongo
|
https://github.com/mongodb/mongo/commit/a5e2f9b0a236462a6d1ca129583c617f111367b4
|
SERVER-59071 Treat '$sample' as unsharded when connecting directly to shards
(cherry picked from commit f3604b901d688c194de5e430c7fbab060c9dc8e0)
| 1
|
createRandomCursorExecutor(const CollectionPtr& coll,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
long long sampleSize,
long long numRecords,
boost::optional<BucketUnpacker> bucketUnpacker) {
OperationContext* opCtx = expCtx->opCtx;
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
static const double kMaxSampleRatioForRandCursor = 0.05;
if (!expCtx->ns.isTimeseriesBucketsCollection()) {
if (sampleSize > numRecords * kMaxSampleRatioForRandCursor || numRecords <= 100) {
return std::pair{nullptr, false};
}
} else {
// Suppose that a time-series bucket collection is observed to contain 200 buckets, and the
// 'gTimeseriesBucketMaxCount' parameter is set to 1000. If all buckets are full, then the
// maximum possible measurment count would be 200 * 1000 = 200,000. While the
// 'SampleFromTimeseriesBucket' plan is more efficient when the sample size is small
// relative to the total number of measurements in the time-series collection, for larger
// sample sizes the top-k sort based sample is faster. Experiments have approximated that
// the tipping point is roughly when the requested sample size is greater than 1% of the
// maximum possible number of measurements in the collection (i.e. numBuckets *
// maxMeasurementsPerBucket).
static const double kCoefficient = 0.01;
if (sampleSize > kCoefficient * numRecords * gTimeseriesBucketMaxCount) {
return std::pair{nullptr, false};
}
}
// Attempt to get a random cursor from the RecordStore.
auto rsRandCursor = coll->getRecordStore()->getRandomCursor(opCtx);
if (!rsRandCursor) {
// The storage engine has no random cursor support.
return std::pair{nullptr, false};
}
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
std::unique_ptr<PlanStage> root =
std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
// collection, otherwise treat it as unsharded
auto collectionFilter =
CollectionShardingState::get(opCtx, coll->ns())
->getOwnershipFilter(
opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
TrialStage* trialStage = nullptr;
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
// to a collection scan if the ratio of orphaned to owned documents encountered over the first
// 100 works() is such that we would have chosen not to optimize.
static const size_t kMaxPresampleSize = 100;
if (collectionFilter.isSharded() && !expCtx->ns.isTimeseriesBucketsCollection()) {
// The ratio of owned to orphaned documents must be at least equal to the ratio between the
// requested sampleSize and the maximum permitted sampleSize for the original constraints to
// be satisfied. For instance, if there are 200 documents and the sampleSize is 5, then at
// least (5 / (200*0.05)) = (5/10) = 50% of those documents must be owned. If less than 5%
// of the documents in the collection are owned, we default to the backup plan.
const auto minAdvancedToWorkRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
} else if (expCtx->ns.isTimeseriesBucketsCollection()) {
// Use a 'TrialStage' to run a trial between 'SampleFromTimeseriesBucket' and
// 'UnpackTimeseriesBucket' with $sample left in the pipeline in-place. If the buckets are
// not sufficiently full, or the 'SampleFromTimeseriesBucket' plan draws too many
// duplicates, then we will fall back to the 'TrialStage' backup plan. This backup plan uses
// the top-k sort sampling approach.
//
// Suppose the 'gTimeseriesBucketMaxCount' is 1000, but each bucket only contains 500
// documents on average. The observed trial advanced/work ratio approximates the average
// bucket fullness, noted here as "abf". In this example, abf = 500 / 1000 = 0.5.
// Experiments have shown that the optimized 'SampleFromTimeseriesBucket' algorithm performs
// better than backup plan when
//
// sampleSize < 0.02 * abf * numRecords * gTimeseriesBucketMaxCount
//
// This inequality can be rewritten as
//
// abf > sampleSize / (0.02 * numRecords * gTimeseriesBucketMaxCount)
//
// Therefore, if the advanced/work ratio exceeds this threshold, we will use the
// 'SampleFromTimeseriesBucket' plan. Note that as the sample size requested by the user
// becomes larger with respect to the number of buckets, we require a higher advanced/work
// ratio in order to justify using 'SampleFromTimeseriesBucket'.
//
// Additionally, we require the 'TrialStage' to approximate the abf as at least 0.25. When
// buckets are mostly empty, the 'SampleFromTimeseriesBucket' will be inefficient due to a
// lot of sampling "misses".
static const auto kCoefficient = 0.02;
static const auto kMinBucketFullness = 0.25;
const auto minAdvancedToWorkRatio = std::max(
std::min(sampleSize / (kCoefficient * numRecords * gTimeseriesBucketMaxCount), 1.0),
kMinBucketFullness);
auto arhashPlan = std::make_unique<SampleFromTimeseriesBucket>(
expCtx.get(),
ws.get(),
std::move(root),
*bucketUnpacker,
// By using a quantity slightly higher than 'kMaxPresampleSize', we ensure that the
// 'SampleFromTimeseriesBucket' stage won't fail due to too many consecutive sampling
// attempts during the 'TrialStage's trial period.
kMaxPresampleSize + 5,
sampleSize,
gTimeseriesBucketMaxCount);
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
auto topkSortPlan = std::make_unique<UnpackTimeseriesBucket>(
expCtx.get(), ws.get(), std::move(collScanPlan), *bucketUnpacker);
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(arhashPlan),
std::move(topkSortPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
}
auto execStatus = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
&coll,
opCtx->inMultiDocumentTransaction()
? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY
: PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
QueryPlannerParams::RETURN_OWNED_DATA);
if (!execStatus.isOK()) {
return execStatus.getStatus();
}
// For sharded collections, the root of the plan tree is a TrialStage that may have chosen
// either a random-sampling cursor trial plan or a COLLSCAN backup plan. We can only optimize
// the $sample aggregation stage if the trial plan was chosen.
return std::pair{std::move(execStatus.getValue()),
!trialStage || !trialStage->pickedBackupPlan()};
}
|
101223003381904306346527857489543984398
|
None
|
CWE-617
|
CVE-2021-32037
|
An authorized user may trigger an invariant which may result in denial of service or server exit if a relevant aggregation request is sent to a shard. Usually, the requests are sent via mongos and special privileges are required in order to know the address of the shards and to log in to the shards of an auth enabled environment.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-32037
|
|
483,469
|
mongo
|
a5e2f9b0a236462a6d1ca129583c617f111367b4
|
https://github.com/mongodb/mongo
|
https://github.com/mongodb/mongo/commit/a5e2f9b0a236462a6d1ca129583c617f111367b4
|
SERVER-59071 Treat '$sample' as unsharded when connecting directly to shards
(cherry picked from commit f3604b901d688c194de5e430c7fbab060c9dc8e0)
| 0
|
createRandomCursorExecutor(const CollectionPtr& coll,
const boost::intrusive_ptr<ExpressionContext>& expCtx,
long long sampleSize,
long long numRecords,
boost::optional<BucketUnpacker> bucketUnpacker) {
OperationContext* opCtx = expCtx->opCtx;
// Verify that we are already under a collection lock. We avoid taking locks ourselves in this
// function because double-locking forces any PlanExecutor we create to adopt a NO_YIELD policy.
invariant(opCtx->lockState()->isCollectionLockedForMode(coll->ns(), MODE_IS));
static const double kMaxSampleRatioForRandCursor = 0.05;
if (!expCtx->ns.isTimeseriesBucketsCollection()) {
if (sampleSize > numRecords * kMaxSampleRatioForRandCursor || numRecords <= 100) {
return std::pair{nullptr, false};
}
} else {
// Suppose that a time-series bucket collection is observed to contain 200 buckets, and the
// 'gTimeseriesBucketMaxCount' parameter is set to 1000. If all buckets are full, then the
// maximum possible measurment count would be 200 * 1000 = 200,000. While the
// 'SampleFromTimeseriesBucket' plan is more efficient when the sample size is small
// relative to the total number of measurements in the time-series collection, for larger
// sample sizes the top-k sort based sample is faster. Experiments have approximated that
// the tipping point is roughly when the requested sample size is greater than 1% of the
// maximum possible number of measurements in the collection (i.e. numBuckets *
// maxMeasurementsPerBucket).
static const double kCoefficient = 0.01;
if (sampleSize > kCoefficient * numRecords * gTimeseriesBucketMaxCount) {
return std::pair{nullptr, false};
}
}
// Attempt to get a random cursor from the RecordStore.
auto rsRandCursor = coll->getRecordStore()->getRandomCursor(opCtx);
if (!rsRandCursor) {
// The storage engine has no random cursor support.
return std::pair{nullptr, false};
}
// Build a MultiIteratorStage and pass it the random-sampling RecordCursor.
auto ws = std::make_unique<WorkingSet>();
std::unique_ptr<PlanStage> root =
std::make_unique<MultiIteratorStage>(expCtx.get(), ws.get(), coll);
static_cast<MultiIteratorStage*>(root.get())->addIterator(std::move(rsRandCursor));
TrialStage* trialStage = nullptr;
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
// to a collection scan if the ratio of orphaned to owned documents encountered over the first
// 100 works() is such that we would have chosen not to optimize.
static const size_t kMaxPresampleSize = 100;
if (auto css = CollectionShardingState::get(opCtx, coll->ns());
css->getCollectionDescription(opCtx).isSharded() &&
!expCtx->ns.isTimeseriesBucketsCollection()) {
// The ratio of owned to orphaned documents must be at least equal to the ratio between the
// requested sampleSize and the maximum permitted sampleSize for the original constraints to
// be satisfied. For instance, if there are 200 documents and the sampleSize is 5, then at
// least (5 / (200*0.05)) = (5/10) = 50% of those documents must be owned. If less than 5%
// of the documents in the collection are owned, we default to the backup plan.
const auto minAdvancedToWorkRatio = std::max(
sampleSize / (numRecords * kMaxSampleRatioForRandCursor), kMaxSampleRatioForRandCursor);
// Since the incoming operation is sharded, use the CSS to infer the filtering metadata for
// the collection. We get the shard ownership filter after checking to see if the collection
// is sharded to avoid an invariant from being fired in this call.
auto collectionFilter = css->getOwnershipFilter(
opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup);
// The trial plan is SHARDING_FILTER-MULTI_ITERATOR.
auto randomCursorPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(root));
// The backup plan is SHARDING_FILTER-COLLSCAN.
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
collScanPlan = std::make_unique<ShardFilterStage>(
expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan));
// Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans.
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(randomCursorPlan),
std::move(collScanPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
} else if (expCtx->ns.isTimeseriesBucketsCollection()) {
// Use a 'TrialStage' to run a trial between 'SampleFromTimeseriesBucket' and
// 'UnpackTimeseriesBucket' with $sample left in the pipeline in-place. If the buckets are
// not sufficiently full, or the 'SampleFromTimeseriesBucket' plan draws too many
// duplicates, then we will fall back to the 'TrialStage' backup plan. This backup plan uses
// the top-k sort sampling approach.
//
// Suppose the 'gTimeseriesBucketMaxCount' is 1000, but each bucket only contains 500
// documents on average. The observed trial advanced/work ratio approximates the average
// bucket fullness, noted here as "abf". In this example, abf = 500 / 1000 = 0.5.
// Experiments have shown that the optimized 'SampleFromTimeseriesBucket' algorithm performs
// better than backup plan when
//
// sampleSize < 0.02 * abf * numRecords * gTimeseriesBucketMaxCount
//
// This inequality can be rewritten as
//
// abf > sampleSize / (0.02 * numRecords * gTimeseriesBucketMaxCount)
//
// Therefore, if the advanced/work ratio exceeds this threshold, we will use the
// 'SampleFromTimeseriesBucket' plan. Note that as the sample size requested by the user
// becomes larger with respect to the number of buckets, we require a higher advanced/work
// ratio in order to justify using 'SampleFromTimeseriesBucket'.
//
// Additionally, we require the 'TrialStage' to approximate the abf as at least 0.25. When
// buckets are mostly empty, the 'SampleFromTimeseriesBucket' will be inefficient due to a
// lot of sampling "misses".
static const auto kCoefficient = 0.02;
static const auto kMinBucketFullness = 0.25;
const auto minAdvancedToWorkRatio = std::max(
std::min(sampleSize / (kCoefficient * numRecords * gTimeseriesBucketMaxCount), 1.0),
kMinBucketFullness);
auto arhashPlan = std::make_unique<SampleFromTimeseriesBucket>(
expCtx.get(),
ws.get(),
std::move(root),
*bucketUnpacker,
// By using a quantity slightly higher than 'kMaxPresampleSize', we ensure that the
// 'SampleFromTimeseriesBucket' stage won't fail due to too many consecutive sampling
// attempts during the 'TrialStage's trial period.
kMaxPresampleSize + 5,
sampleSize,
gTimeseriesBucketMaxCount);
std::unique_ptr<PlanStage> collScanPlan = std::make_unique<CollectionScan>(
expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr);
auto topkSortPlan = std::make_unique<UnpackTimeseriesBucket>(
expCtx.get(), ws.get(), std::move(collScanPlan), *bucketUnpacker);
root = std::make_unique<TrialStage>(expCtx.get(),
ws.get(),
std::move(arhashPlan),
std::move(topkSortPlan),
kMaxPresampleSize,
minAdvancedToWorkRatio);
trialStage = static_cast<TrialStage*>(root.get());
}
auto execStatus = plan_executor_factory::make(expCtx,
std::move(ws),
std::move(root),
&coll,
opCtx->inMultiDocumentTransaction()
? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY
: PlanYieldPolicy::YieldPolicy::YIELD_AUTO,
QueryPlannerParams::RETURN_OWNED_DATA);
if (!execStatus.isOK()) {
return execStatus.getStatus();
}
// For sharded collections, the root of the plan tree is a TrialStage that may have chosen
// either a random-sampling cursor trial plan or a COLLSCAN backup plan. We can only optimize
// the $sample aggregation stage if the trial plan was chosen.
return std::pair{std::move(execStatus.getValue()),
!trialStage || !trialStage->pickedBackupPlan()};
}
|
53510909926140160570175197476495085795
|
None
|
CWE-617
|
CVE-2021-32037
|
An authorized user may trigger an invariant which may result in denial of service or server exit if a relevant aggregation request is sent to a shard. Usually, the requests are sent via mongos and special privileges are required in order to know the address of the shards and to log in to the shards of an auth enabled environment.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-32037
|
|
215,142
|
open62541
|
b79db1ac78146fc06b0b8435773d3967de2d659c
|
https://github.com/open62541/open62541
|
https://github.com/open62541/open62541/commit/b79db1ac78146fc06b0b8435773d3967de2d659c
|
fix(plugin): Add default limits for chunks and message size
Based on a reported DoS vulnerability reported by Team82 (Claroty
Research).
| 1
|
setup_secureChannel(void) {
TestingPolicy(&dummyPolicy, dummyCertificate, &fCalled, &keySizes);
UA_SecureChannel_init(&testChannel, &UA_ConnectionConfig_default);
UA_SecureChannel_setSecurityPolicy(&testChannel, &dummyPolicy, &dummyCertificate);
testingConnection = createDummyConnection(65535, &sentData);
UA_Connection_attachSecureChannel(&testingConnection, &testChannel);
testChannel.connection = &testingConnection;
testChannel.state = UA_SECURECHANNELSTATE_OPEN;
}
|
129373646789395132978577350520275897854
|
check_securechannel.c
|
237191463744761061393465477757063019069
|
CWE-703
|
CVE-2022-25761
|
The package open62541/open62541 before 1.2.5, from 1.3-rc1 and before 1.3.1 are vulnerable to Denial of Service (DoS) due to a missing limitation on the number of received chunks - per single session or in total for all concurrent sessions. An attacker can exploit this vulnerability by sending an unlimited number of huge chunks (e.g. 2GB each) without sending the Final closing chunk.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-25761
|
484,063
|
open62541
|
b79db1ac78146fc06b0b8435773d3967de2d659c
|
https://github.com/open62541/open62541
|
https://github.com/open62541/open62541/commit/b79db1ac78146fc06b0b8435773d3967de2d659c
|
fix(plugin): Add default limits for chunks and message size
Based on a reported DoS vulnerability reported by Team82 (Claroty
Research).
| 0
|
setup_secureChannel(void) {
TestingPolicy(&dummyPolicy, dummyCertificate, &fCalled, &keySizes);
UA_SecureChannel_init(&testChannel, &UA_ConnectionConfig_default);
UA_SecureChannel_setSecurityPolicy(&testChannel, &dummyPolicy, &dummyCertificate);
testingConnection =
createDummyConnection(UA_ConnectionConfig_default.sendBufferSize, &sentData);
UA_Connection_attachSecureChannel(&testingConnection, &testChannel);
testChannel.connection = &testingConnection;
testChannel.state = UA_SECURECHANNELSTATE_OPEN;
}
|
210099699353094700409954983864057205013
|
check_securechannel.c
|
317936293844889222163489724834611177083
|
CWE-703
|
CVE-2022-25761
|
The package open62541/open62541 before 1.2.5, from 1.3-rc1 and before 1.3.1 are vulnerable to Denial of Service (DoS) due to a missing limitation on the number of received chunks - per single session or in total for all concurrent sessions. An attacker can exploit this vulnerability by sending an unlimited number of huge chunks (e.g. 2GB each) without sending the Final closing chunk.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-25761
|
215,188
|
libmobi
|
ab5bf0e37e540eac682a14e628853b918626e72b
|
https://github.com/bfabiszewski/libmobi
|
https://github.com/bfabiszewski/libmobi/commit/ab5bf0e37e540eac682a14e628853b918626e72b
|
fix oob write bug inside libmobi
| 1
|
void mobi_buffer_move(MOBIBuffer *buf, const int offset, const size_t len) {
size_t aoffset = (size_t) abs(offset);
unsigned char *source = buf->data + buf->offset;
if (offset >= 0) {
if (buf->offset + aoffset + len > buf->maxlen) {
debug_print("%s", "End of buffer\n");
buf->error = MOBI_BUFFER_END;
return;
}
source += aoffset;
} else {
if (buf->offset < aoffset) {
debug_print("%s", "End of buffer\n");
buf->error = MOBI_BUFFER_END;
return;
}
source -= aoffset;
}
memmove(buf->data + buf->offset, source, len);
buf->offset += len;
}
|
63463081562925737105123134404328274929
|
None
|
CWE-787
|
CVE-2021-3751
|
libmobi is vulnerable to Out-of-bounds Write
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3751
|
|
484,732
|
libmobi
|
ab5bf0e37e540eac682a14e628853b918626e72b
|
https://github.com/bfabiszewski/libmobi
|
https://github.com/bfabiszewski/libmobi/commit/ab5bf0e37e540eac682a14e628853b918626e72b
|
fix oob write bug inside libmobi
| 0
|
void mobi_buffer_move(MOBIBuffer *buf, const int offset, const size_t len) {
size_t aoffset = (size_t) abs(offset);
unsigned char *source = buf->data + buf->offset;
if (offset >= 0) {
if (buf->offset + aoffset + len > buf->maxlen) {
debug_print("%s", "End of buffer\n");
buf->error = MOBI_BUFFER_END;
return;
}
source += aoffset;
} else {
if ( (buf->offset < aoffset) || (buf->offset + len > buf->maxlen) ) {
debug_print("%s", "Beyond start/end of buffer\n");
buf->error = MOBI_BUFFER_END;
return;
}
source -= aoffset;
}
memmove(buf->data + buf->offset, source, len);
buf->offset += len;
}
|
144002101291977125414572650343381313849
|
None
|
CWE-787
|
CVE-2021-3751
|
libmobi is vulnerable to Out-of-bounds Write
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3751
|
|
215,262
|
qemu
|
e73adfbeec9d4e008630c814759052ed945c3fed
|
https://github.com/bonzini/qemu
|
https://git.qemu.org/?p=qemu.git;a=commit;h=e73adfbeec9d4e008630c814759052ed945c3fed
|
cadence_gem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Alexander Bulekov <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
| 1
|
static void gem_transmit(CadenceGEMState *s)
{
uint32_t desc[DESC_MAX_NUM_WORDS];
hwaddr packet_desc_addr;
uint8_t *p;
unsigned total_bytes;
int q = 0;
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
return;
}
DB_PRINT("\n");
/* The packet we will hand off to QEMU.
* Packets scattered across multiple descriptors are gathered to this
* one contiguous buffer first.
*/
p = s->tx_packet;
total_bytes = 0;
for (q = s->num_priority_queues - 1; q >= 0; q--) {
/* read current descriptor */
packet_desc_addr = gem_get_tx_desc_addr(s, q);
DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
address_space_read(&s->dma_as, packet_desc_addr,
MEMTXATTRS_UNSPECIFIED, desc,
sizeof(uint32_t) * gem_get_desc_len(s, false));
/* Handle all descriptors owned by hardware */
while (tx_desc_get_used(desc) == 0) {
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
return;
}
print_gem_tx_desc(desc, q);
/* The real hardware would eat this (and possibly crash).
* For QEMU let's lend a helping hand.
*/
if ((tx_desc_get_buffer(s, desc) == 0) ||
(tx_desc_get_length(desc) == 0)) {
DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
packet_desc_addr);
break;
}
if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
(p - s->tx_packet)) {
qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
packet_desc_addr, tx_desc_get_length(desc),
gem_get_max_buf_len(s, true) - (p - s->tx_packet));
gem_set_isr(s, q, GEM_INT_AMBA_ERR);
break;
}
/* Gather this fragment of the packet from "dma memory" to our
* contig buffer.
*/
address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc),
MEMTXATTRS_UNSPECIFIED,
p, tx_desc_get_length(desc));
p += tx_desc_get_length(desc);
total_bytes += tx_desc_get_length(desc);
/* Last descriptor for this packet; hand the whole thing off */
if (tx_desc_get_last(desc)) {
uint32_t desc_first[DESC_MAX_NUM_WORDS];
hwaddr desc_addr = gem_get_tx_desc_addr(s, q);
/* Modify the 1st descriptor of this packet to be owned by
* the processor.
*/
address_space_read(&s->dma_as, desc_addr,
MEMTXATTRS_UNSPECIFIED, desc_first,
sizeof(desc_first));
tx_desc_set_used(desc_first);
address_space_write(&s->dma_as, desc_addr,
MEMTXATTRS_UNSPECIFIED, desc_first,
sizeof(desc_first));
/* Advance the hardware current descriptor past this packet */
if (tx_desc_get_wrap(desc)) {
s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
} else {
s->tx_desc_addr[q] = packet_desc_addr +
4 * gem_get_desc_len(s, false);
}
DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
gem_set_isr(s, q, GEM_INT_TXCMPL);
/* Handle interrupt consequences */
gem_update_int_status(s);
/* Is checksum offload enabled? */
if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL);
}
/* Update MAC statistics */
gem_transmit_updatestats(s, s->tx_packet, total_bytes);
/* Send the packet somewhere */
if (s->phy_loop || (s->regs[GEM_NWCTRL] &
GEM_NWCTRL_LOCALLOOP)) {
gem_receive(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
} else {
qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
}
/* Prepare for next packet */
p = s->tx_packet;
total_bytes = 0;
}
/* read next descriptor */
if (tx_desc_get_wrap(desc)) {
if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
packet_desc_addr = s->regs[GEM_TBQPH];
packet_desc_addr <<= 32;
} else {
packet_desc_addr = 0;
}
packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
} else {
packet_desc_addr += 4 * gem_get_desc_len(s, false);
}
DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
address_space_read(&s->dma_as, packet_desc_addr,
MEMTXATTRS_UNSPECIFIED, desc,
sizeof(uint32_t) * gem_get_desc_len(s, false));
}
if (tx_desc_get_used(desc)) {
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
/* IRQ TXUSED is defined only for queue 0 */
if (q == 0) {
gem_set_isr(s, 0, GEM_INT_TXUSED);
}
gem_update_int_status(s);
}
}
}
|
96949803014582607595727068395060090256
|
cadence_gem.c
|
142576312399333534516163795241152257631
|
CWE-835
|
CVE-2021-3416
|
A potential stack overflow via infinite loop issue was found in various NIC emulators of QEMU in versions up to and including 5.2.0. The issue occurs in loopback mode of a NIC wherein reentrant DMA checks get bypassed. A guest user/process may use this flaw to consume CPU cycles or crash the QEMU process on the host resulting in DoS scenario.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3416
|
486,796
|
qemu
|
e73adfbeec9d4e008630c814759052ed945c3fed
|
https://github.com/bonzini/qemu
|
https://git.qemu.org/?p=qemu.git;a=commit;h=e73adfbeec9d4e008630c814759052ed945c3fed
|
cadence_gem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Alexander Bulekov <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
| 0
|
static void gem_transmit(CadenceGEMState *s)
{
uint32_t desc[DESC_MAX_NUM_WORDS];
hwaddr packet_desc_addr;
uint8_t *p;
unsigned total_bytes;
int q = 0;
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
return;
}
DB_PRINT("\n");
/* The packet we will hand off to QEMU.
* Packets scattered across multiple descriptors are gathered to this
* one contiguous buffer first.
*/
p = s->tx_packet;
total_bytes = 0;
for (q = s->num_priority_queues - 1; q >= 0; q--) {
/* read current descriptor */
packet_desc_addr = gem_get_tx_desc_addr(s, q);
DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
address_space_read(&s->dma_as, packet_desc_addr,
MEMTXATTRS_UNSPECIFIED, desc,
sizeof(uint32_t) * gem_get_desc_len(s, false));
/* Handle all descriptors owned by hardware */
while (tx_desc_get_used(desc) == 0) {
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
return;
}
print_gem_tx_desc(desc, q);
/* The real hardware would eat this (and possibly crash).
* For QEMU let's lend a helping hand.
*/
if ((tx_desc_get_buffer(s, desc) == 0) ||
(tx_desc_get_length(desc) == 0)) {
DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
packet_desc_addr);
break;
}
if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
(p - s->tx_packet)) {
qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
packet_desc_addr, tx_desc_get_length(desc),
gem_get_max_buf_len(s, true) - (p - s->tx_packet));
gem_set_isr(s, q, GEM_INT_AMBA_ERR);
break;
}
/* Gather this fragment of the packet from "dma memory" to our
* contig buffer.
*/
address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc),
MEMTXATTRS_UNSPECIFIED,
p, tx_desc_get_length(desc));
p += tx_desc_get_length(desc);
total_bytes += tx_desc_get_length(desc);
/* Last descriptor for this packet; hand the whole thing off */
if (tx_desc_get_last(desc)) {
uint32_t desc_first[DESC_MAX_NUM_WORDS];
hwaddr desc_addr = gem_get_tx_desc_addr(s, q);
/* Modify the 1st descriptor of this packet to be owned by
* the processor.
*/
address_space_read(&s->dma_as, desc_addr,
MEMTXATTRS_UNSPECIFIED, desc_first,
sizeof(desc_first));
tx_desc_set_used(desc_first);
address_space_write(&s->dma_as, desc_addr,
MEMTXATTRS_UNSPECIFIED, desc_first,
sizeof(desc_first));
/* Advance the hardware current descriptor past this packet */
if (tx_desc_get_wrap(desc)) {
s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
} else {
s->tx_desc_addr[q] = packet_desc_addr +
4 * gem_get_desc_len(s, false);
}
DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
gem_set_isr(s, q, GEM_INT_TXCMPL);
/* Handle interrupt consequences */
gem_update_int_status(s);
/* Is checksum offload enabled? */
if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL);
}
/* Update MAC statistics */
gem_transmit_updatestats(s, s->tx_packet, total_bytes);
/* Send the packet somewhere */
if (s->phy_loop || (s->regs[GEM_NWCTRL] &
GEM_NWCTRL_LOCALLOOP)) {
qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
} else {
qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
total_bytes);
}
/* Prepare for next packet */
p = s->tx_packet;
total_bytes = 0;
}
/* read next descriptor */
if (tx_desc_get_wrap(desc)) {
if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
packet_desc_addr = s->regs[GEM_TBQPH];
packet_desc_addr <<= 32;
} else {
packet_desc_addr = 0;
}
packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
} else {
packet_desc_addr += 4 * gem_get_desc_len(s, false);
}
DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
address_space_read(&s->dma_as, packet_desc_addr,
MEMTXATTRS_UNSPECIFIED, desc,
sizeof(uint32_t) * gem_get_desc_len(s, false));
}
if (tx_desc_get_used(desc)) {
s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
/* IRQ TXUSED is defined only for queue 0 */
if (q == 0) {
gem_set_isr(s, 0, GEM_INT_TXUSED);
}
gem_update_int_status(s);
}
}
}
|
333706942896596885696237067919896756767
|
cadence_gem.c
|
35811516595388673046749961470018191104
|
CWE-835
|
CVE-2021-3416
|
A potential stack overflow via infinite loop issue was found in various NIC emulators of QEMU in versions up to and including 5.2.0. The issue occurs in loopback mode of a NIC wherein reentrant DMA checks get bypassed. A guest user/process may use this flaw to consume CPU cycles or crash the QEMU process on the host resulting in DoS scenario.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3416
|
215,264
|
perl5
|
66bbb51b93253a3f87d11c2695cfb7bdb782184a
|
https://github.com/perl/perl5
|
https://github.com/perl/perl5/commit/66bbb51b93253a3f87d11c2695cfb7bdb782184a
|
study_chunk: avoid mutating regexp program within GOSUB
gh16947 and gh17743: studying GOSUB may restudy in an inner call
(via a mix of recursion and enframing) something that an outer call
is in the middle of looking at. Let the outer frame deal with it.
(CVE-2020-12723)
(cherry picked from commit c4033e740bd18d9fbe3456a9db2ec2053cdc5271)
| 1
|
STATIC SSize_t
S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
SSize_t *minlenp, SSize_t *deltap,
regnode *last,
scan_data_t *data,
I32 stopparen,
U32 recursed_depth,
regnode_ssc *and_withp,
U32 flags, U32 depth)
/* scanp: Start here (read-write). */
/* deltap: Write maxlen-minlen here. */
/* last: Stop before this one. */
/* data: string data about the pattern */
/* stopparen: treat close N as END */
/* recursed: which subroutines have we recursed into */
/* and_withp: Valid if flags & SCF_DO_STCLASS_OR */
{
dVAR;
/* There must be at least this number of characters to match */
SSize_t min = 0;
I32 pars = 0, code;
regnode *scan = *scanp, *next;
SSize_t delta = 0;
int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF);
int is_inf_internal = 0; /* The studied chunk is infinite */
I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0;
scan_data_t data_fake;
SV *re_trie_maxbuff = NULL;
regnode *first_non_open = scan;
SSize_t stopmin = SSize_t_MAX;
scan_frame *frame = NULL;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_STUDY_CHUNK;
RExC_study_started= 1;
Zero(&data_fake, 1, scan_data_t);
if ( depth == 0 ) {
while (first_non_open && OP(first_non_open) == OPEN)
first_non_open=regnext(first_non_open);
}
fake_study_recurse:
DEBUG_r(
RExC_study_chunk_recursed_count++;
);
DEBUG_OPTIMISE_MORE_r(
{
Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p",
depth, (long)stopparen,
(unsigned long)RExC_study_chunk_recursed_count,
(unsigned long)depth, (unsigned long)recursed_depth,
scan,
last);
if (recursed_depth) {
U32 i;
U32 j;
for ( j = 0 ; j < recursed_depth ; j++ ) {
for ( i = 0 ; i < (U32)RExC_total_parens ; i++ ) {
if (
PAREN_TEST(RExC_study_chunk_recursed +
( j * RExC_study_chunk_recursed_bytes), i )
&& (
!j ||
!PAREN_TEST(RExC_study_chunk_recursed +
(( j - 1 ) * RExC_study_chunk_recursed_bytes), i)
)
) {
Perl_re_printf( aTHX_ " %d",(int)i);
break;
}
}
if ( j + 1 < recursed_depth ) {
Perl_re_printf( aTHX_ ",");
}
}
}
Perl_re_printf( aTHX_ "\n");
}
);
while ( scan && OP(scan) != END && scan < last ){
UV min_subtract = 0; /* How mmany chars to subtract from the minimum
node length to get a real minimum (because
the folded version may be shorter) */
bool unfolded_multi_char = FALSE;
/* Peephole optimizer: */
DEBUG_STUDYDATA("Peep", data, depth, is_inf);
DEBUG_PEEP("Peep", scan, depth, flags);
/* The reason we do this here is that we need to deal with things like
* /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT
* parsing code, as each (?:..) is handled by a different invocation of
* reg() -- Yves
*/
JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0);
/* Follow the next-chain of the current node and optimize
away all the NOTHINGs from it.
*/
rck_elide_nothing(scan);
/* The principal pseudo-switch. Cannot be a switch, since we
look into several different things. */
if ( OP(scan) == DEFINEP ) {
SSize_t minlen = 0;
SSize_t deltanext = 0;
SSize_t fake_last_close = 0;
I32 f = SCF_IN_DEFINE;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
scan = regnext(scan);
assert( OP(scan) == IFTHEN );
DEBUG_PEEP("expect IFTHEN", scan, depth, flags);
data_fake.last_closep= &fake_last_close;
minlen = *minlenp;
next = regnext(scan);
scan = NEXTOPER(NEXTOPER(scan));
DEBUG_PEEP("scan", scan, depth, flags);
DEBUG_PEEP("next", next, depth, flags);
/* we suppose the run is continuous, last=next...
* NOTE we dont use the return here! */
/* DEFINEP study_chunk() recursion */
(void)study_chunk(pRExC_state, &scan, &minlen,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
scan = next;
} else
if (
OP(scan) == BRANCH ||
OP(scan) == BRANCHJ ||
OP(scan) == IFTHEN
) {
next = regnext(scan);
code = OP(scan);
/* The op(next)==code check below is to see if we
* have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN"
* IFTHEN is special as it might not appear in pairs.
* Not sure whether BRANCH-BRANCHJ is possible, regardless
* we dont handle it cleanly. */
if (OP(next) == code || code == IFTHEN) {
/* NOTE - There is similar code to this block below for
* handling TRIE nodes on a re-study. If you change stuff here
* check there too. */
SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0;
regnode_ssc accum;
regnode * const startbranch=scan;
if (flags & SCF_DO_SUBSTR) {
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
while (OP(scan) == code) {
SSize_t deltanext, minnext, fake;
I32 f = 0;
regnode_ssc this_class;
DEBUG_PEEP("Branch", scan, depth, flags);
num++;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
next = regnext(scan);
scan = NEXTOPER(scan); /* everything */
if (code != BRANCH) /* everything but BRANCH */
scan = NEXTOPER(scan);
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
/* we suppose the run is continuous, last=next...*/
/* recurse study_chunk() for each BRANCH in an alternation */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
if (min1 > minnext)
min1 = minnext;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < minnext + deltanext)
max1 = minnext + deltanext;
scan = next;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > minnext)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class);
}
if (code == IFTHEN && num < 2) /* Empty ELSE branch */
min1 = 0;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
if (data->pos_delta >= SSize_t_MAX - (max1 - min1))
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1;
}
min += min1;
if (delta == SSize_t_MAX
|| SSize_t_MAX - delta - (max1 - min1) < 0)
delta = SSize_t_MAX;
else
delta += max1 - min1;
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
if (PERL_ENABLE_TRIE_OPTIMISATION &&
OP( startbranch ) == BRANCH )
{
/* demq.
Assuming this was/is a branch we are dealing with: 'scan'
now points at the item that follows the branch sequence,
whatever it is. We now start at the beginning of the
sequence and look for subsequences of
BRANCH->EXACT=>x1
BRANCH->EXACT=>x2
tail
which would be constructed from a pattern like
/A|LIST|OF|WORDS/
If we can find such a subsequence we need to turn the first
element into a trie and then add the subsequent branch exact
strings to the trie.
We have two cases
1. patterns where the whole set of branches can be
converted.
2. patterns where only a subset can be converted.
In case 1 we can replace the whole set with a single regop
for the trie. In case 2 we need to keep the start and end
branches so
'BRANCH EXACT; BRANCH EXACT; BRANCH X'
becomes BRANCH TRIE; BRANCH X;
There is an additional case, that being where there is a
common prefix, which gets split out into an EXACT like node
preceding the TRIE node.
If x(1..n)==tail then we can do a simple trie, if not we make
a "jump" trie, such that when we match the appropriate word
we "jump" to the appropriate tail node. Essentially we turn
a nested if into a case structure of sorts.
*/
int made=0;
if (!re_trie_maxbuff) {
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1);
if (!SvIOK(re_trie_maxbuff))
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
if ( SvIV(re_trie_maxbuff)>=0 ) {
regnode *cur;
regnode *first = (regnode *)NULL;
regnode *last = (regnode *)NULL;
regnode *tail = scan;
U8 trietype = 0;
U32 count=0;
/* var tail is used because there may be a TAIL
regop in the way. Ie, the exacts will point to the
thing following the TAIL, but the last branch will
point at the TAIL. So we advance tail. If we
have nested (?:) we may have to move through several
tails.
*/
while ( OP( tail ) == TAIL ) {
/* this is the TAIL generated by (?:) */
tail = regnext( tail );
}
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n",
depth+1,
"Looking for TRIE'able sequences. Tail node is ",
(UV) REGNODE_OFFSET(tail),
SvPV_nolen_const( RExC_mysv )
);
});
/*
Step through the branches
cur represents each branch,
noper is the first thing to be matched as part
of that branch
noper_next is the regnext() of that node.
We normally handle a case like this
/FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also
support building with NOJUMPTRIE, which restricts
the trie logic to structures like /FOO|BAR/.
If noper is a trieable nodetype then the branch is
a possible optimization target. If we are building
under NOJUMPTRIE then we require that noper_next is
the same as scan (our current position in the regex
program).
Once we have two or more consecutive such branches
we can create a trie of the EXACT's contents and
stitch it in place into the program.
If the sequence represents all of the branches in
the alternation we replace the entire thing with a
single TRIE node.
Otherwise when it is a subsequence we need to
stitch it in place and replace only the relevant
branches. This means the first branch has to remain
as it is used by the alternation logic, and its
next pointer, and needs to be repointed at the item
on the branch chain following the last branch we
have optimized away.
This could be either a BRANCH, in which case the
subsequence is internal, or it could be the item
following the branch sequence in which case the
subsequence is at the end (which does not
necessarily mean the first node is the start of the
alternation).
TRIE_TYPE(X) is a define which maps the optype to a
trietype.
optype | trietype
----------------+-----------
NOTHING | NOTHING
EXACT | EXACT
EXACT_ONLY8 | EXACT
EXACTFU | EXACTFU
EXACTFU_ONLY8 | EXACTFU
EXACTFUP | EXACTFU
EXACTFAA | EXACTFAA
EXACTL | EXACTL
EXACTFLU8 | EXACTFLU8
*/
#define TRIE_TYPE(X) ( ( NOTHING == (X) ) \
? NOTHING \
: ( EXACT == (X) || EXACT_ONLY8 == (X) ) \
? EXACT \
: ( EXACTFU == (X) \
|| EXACTFU_ONLY8 == (X) \
|| EXACTFUP == (X) ) \
? EXACTFU \
: ( EXACTFAA == (X) ) \
? EXACTFAA \
: ( EXACTL == (X) ) \
? EXACTL \
: ( EXACTFLU8 == (X) ) \
? EXACTFLU8 \
: 0 )
/* dont use tail as the end marker for this traverse */
for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) {
regnode * const noper = NEXTOPER( cur );
U8 noper_type = OP( noper );
U8 noper_trietype = TRIE_TYPE( noper_type );
#if defined(DEBUGGING) || defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0;
#endif
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %d:%s (%d)",
depth+1,
REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) );
regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state);
Perl_re_printf( aTHX_ " -> %d:%s",
REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv));
if ( noper_next ) {
regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state);
Perl_re_printf( aTHX_ "\t=> %d:%s\t",
REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv));
}
Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype]
);
});
/* Is noper a trieable nodetype that can be merged
* with the current trie (if there is one)? */
if ( noper_trietype
&&
(
( noper_trietype == NOTHING )
|| ( trietype == NOTHING )
|| ( trietype == noper_trietype )
)
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
&& count < U16_MAX)
{
/* Handle mergable triable node Either we are
* the first node in a new trieable sequence,
* in which case we do some bookkeeping,
* otherwise we update the end pointer. */
if ( !first ) {
first = cur;
if ( noper_trietype == NOTHING ) {
#if !defined(DEBUGGING) && !defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0;
#endif
if ( noper_next_trietype ) {
trietype = noper_next_trietype;
} else if (noper_next_type) {
/* a NOTHING regop is 1 regop wide.
* We need at least two for a trie
* so we can't merge this in */
first = NULL;
}
} else {
trietype = noper_trietype;
}
} else {
if ( trietype == NOTHING )
trietype = noper_trietype;
last = cur;
}
if (first)
count++;
} /* end handle mergable triable node */
else {
/* handle unmergable node -
* noper may either be a triable node which can
* not be tried together with the current trie,
* or a non triable node */
if ( last ) {
/* If last is set and trietype is not
* NOTHING then we have found at least two
* triable branch sequences in a row of a
* similar trietype so we can turn them
* into a trie. If/when we allow NOTHING to
* start a trie sequence this condition
* will be required, and it isn't expensive
* so we leave it in for now. */
if ( trietype && trietype != NOTHING )
make_trie( pRExC_state,
startbranch, first, cur, tail,
count, trietype, depth+1 );
last = NULL; /* note: we clear/update
first, trietype etc below,
so we dont do it here */
}
if ( noper_trietype
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
){
/* noper is triable, so we can start a new
* trie sequence */
count = 1;
first = cur;
trietype = noper_trietype;
} else if (first) {
/* if we already saw a first but the
* current node is not triable then we have
* to reset the first information. */
count = 0;
first = NULL;
trietype = 0;
}
} /* end handle unmergable node */
} /* loop over branches */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ",
depth+1, SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype]
);
});
if ( last && trietype ) {
if ( trietype != NOTHING ) {
/* the last branch of the sequence was part of
* a trie, so we have to construct it here
* outside of the loop */
made= make_trie( pRExC_state, startbranch,
first, scan, tail, count,
trietype, depth+1 );
#ifdef TRIE_STUDY_OPT
if ( ((made == MADE_EXACT_TRIE &&
startbranch == first)
|| ( first_non_open == first )) &&
depth==0 ) {
flags |= SCF_TRIE_RESTUDY;
if ( startbranch == first
&& scan >= tail )
{
RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN;
}
}
#endif
} else {
/* at this point we know whatever we have is a
* NOTHING sequence/branch AND if 'startbranch'
* is 'first' then we can turn the whole thing
* into a NOTHING
*/
if ( startbranch == first ) {
regnode *opt;
/* the entire thing is a NOTHING sequence,
* something like this: (?:|) So we can
* turn it into a plain NOTHING op. */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n",
depth+1,
SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
});
OP(startbranch)= NOTHING;
NEXT_OFF(startbranch)= tail - startbranch;
for ( opt= startbranch + 1; opt < tail ; opt++ )
OP(opt)= OPTIMIZED;
}
}
} /* end if ( last) */
} /* TRIE_MAXBUF is non zero */
} /* do trie */
}
else if ( code == BRANCHJ ) { /* single branch is optimized. */
scan = NEXTOPER(NEXTOPER(scan));
} else /* single branch is optimized. */
scan = NEXTOPER(scan);
continue;
} else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) {
I32 paren = 0;
regnode *start = NULL;
regnode *end = NULL;
U32 my_recursed_depth= recursed_depth;
if (OP(scan) != SUSPEND) { /* GOSUB */
/* Do setup, note this code has side effects beyond
* the rest of this block. Specifically setting
* RExC_recurse[] must happen at least once during
* study_chunk(). */
paren = ARG(scan);
RExC_recurse[ARG2L(scan)] = scan;
start = REGNODE_p(RExC_open_parens[paren]);
end = REGNODE_p(RExC_close_parens[paren]);
/* NOTE we MUST always execute the above code, even
* if we do nothing with a GOSUB */
if (
( flags & SCF_IN_DEFINE )
||
(
(is_inf_internal || is_inf || (data && data->flags & SF_IS_INF))
&&
( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 )
)
) {
/* no need to do anything here if we are in a define. */
/* or we are after some kind of infinite construct
* so we can skip recursing into this item.
* Since it is infinite we will not change the maxlen
* or delta, and if we miss something that might raise
* the minlen it will merely pessimise a little.
*
* Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/
* might result in a minlen of 1 and not of 4,
* but this doesn't make us mismatch, just try a bit
* harder than we should.
* */
scan= regnext(scan);
continue;
}
if (
!recursed_depth
||
!PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren)
) {
/* it is quite possible that there are more efficient ways
* to do this. We maintain a bitmap per level of recursion
* of which patterns we have entered so we can detect if a
* pattern creates a possible infinite loop. When we
* recurse down a level we copy the previous levels bitmap
* down. When we are at recursion level 0 we zero the top
* level bitmap. It would be nice to implement a different
* more efficient way of doing this. In particular the top
* level bitmap may be unnecessary.
*/
if (!recursed_depth) {
Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8);
} else {
Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed_bytes, U8);
}
/* we havent recursed into this paren yet, so recurse into it */
DEBUG_STUDYDATA("gosub-set", data, depth, is_inf);
PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren);
my_recursed_depth= recursed_depth + 1;
} else {
DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf);
/* some form of infinite recursion, assume infinite length
* */
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1;
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
start= NULL; /* reset start so we dont recurse later on. */
}
} else {
paren = stopparen;
start = scan + 2;
end = regnext(scan);
}
if (start) {
scan_frame *newframe;
assert(end);
if (!RExC_frame_last) {
Newxz(newframe, 1, scan_frame);
SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe);
RExC_frame_head= newframe;
RExC_frame_count++;
} else if (!RExC_frame_last->next_frame) {
Newxz(newframe, 1, scan_frame);
RExC_frame_last->next_frame= newframe;
newframe->prev_frame= RExC_frame_last;
RExC_frame_count++;
} else {
newframe= RExC_frame_last->next_frame;
}
RExC_frame_last= newframe;
newframe->next_regnode = regnext(scan);
newframe->last_regnode = last;
newframe->stopparen = stopparen;
newframe->prev_recursed_depth = recursed_depth;
newframe->this_prev_frame= frame;
DEBUG_STUDYDATA("frame-new", data, depth, is_inf);
DEBUG_PEEP("fnew", scan, depth, flags);
frame = newframe;
scan = start;
stopparen = paren;
last = end;
depth = depth + 1;
recursed_depth= my_recursed_depth;
continue;
}
}
else if ( OP(scan) == EXACT
|| OP(scan) == EXACT_ONLY8
|| OP(scan) == EXACTL)
{
SSize_t l = STR_LEN(scan);
UV uc;
assert(l);
if (UTF) {
const U8 * const s = (U8*)STRING(scan);
uc = utf8_to_uvchr_buf(s, s + l, NULL);
l = utf8_length(s, s + l);
} else {
uc = *((U8*)STRING(scan));
}
min += l;
if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */
/* The code below prefers earlier match for fixed
offset, later match for variable offset. */
if (data->last_end == -1) { /* Update the start info. */
data->last_start_min = data->pos_min;
data->last_start_max = is_inf
? SSize_t_MAX : data->pos_min + data->pos_delta;
}
sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan));
if (UTF)
SvUTF8_on(data->last_found);
{
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += utf8_length((U8*)STRING(scan),
(U8*)STRING(scan)+STR_LEN(scan));
}
data->last_end = data->pos_min + l;
data->pos_min += l; /* As in the first entry. */
data->flags &= ~SF_BEFORE_EOL;
}
/* ANDing the code point leaves at most it, and not in locale, and
* can't match null string */
if (flags & SCF_DO_STCLASS_AND) {
ssc_cp_and(data->start_class, uc);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ssc_clear_locale(data->start_class);
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_add_cp(data->start_class, uc);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
else if (PL_regkind[OP(scan)] == EXACT) {
/* But OP != EXACT!, so is EXACTFish */
SSize_t l = STR_LEN(scan);
const U8 * s = (U8*)STRING(scan);
/* Search for fixed substrings supports EXACT only. */
if (flags & SCF_DO_SUBSTR) {
assert(data);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (UTF) {
l = utf8_length(s, s + l);
}
if (unfolded_multi_char) {
RExC_seen |= REG_UNFOLDED_MULTI_SEEN;
}
min += l - min_subtract;
assert (min >= 0);
delta += min_subtract;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += l - min_subtract;
if (data->pos_min < 0) {
data->pos_min = 0;
}
data->pos_delta += min_subtract;
if (min_subtract) {
data->cur_is_floating = 1; /* float */
}
}
if (flags & SCF_DO_STCLASS) {
SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan);
assert(EXACTF_invlist);
if (flags & SCF_DO_STCLASS_AND) {
if (OP(scan) != EXACTFL)
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ANYOF_POSIXL_ZERO(data->start_class);
ssc_intersection(data->start_class, EXACTF_invlist, FALSE);
}
else { /* SCF_DO_STCLASS_OR */
ssc_union(data->start_class, EXACTF_invlist, FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
SvREFCNT_dec(EXACTF_invlist);
}
}
else if (REGNODE_VARIES(OP(scan))) {
SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0;
I32 fl = 0, f = flags;
regnode * const oscan = scan;
regnode_ssc this_class;
regnode_ssc *oclass = NULL;
I32 next_is_eval = 0;
switch (PL_regkind[OP(scan)]) {
case WHILEM: /* End of (?:...)* . */
scan = NEXTOPER(scan);
goto finish;
case PLUS:
if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) {
next = NEXTOPER(scan);
if ( OP(next) == EXACT
|| OP(next) == EXACT_ONLY8
|| OP(next) == EXACTL
|| (flags & SCF_DO_STCLASS))
{
mincount = 1;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
}
if (flags & SCF_DO_SUBSTR)
data->pos_min++;
min++;
/* FALLTHROUGH */
case STAR:
next = NEXTOPER(scan);
/* This temporary node can now be turned into EXACTFU, and
* must, as regexec.c doesn't handle it */
if (OP(next) == EXACTFU_S_EDGE) {
OP(next) = EXACTFU;
}
if ( STR_LEN(next) == 1
&& isALPHA_A(* STRING(next))
&& ( OP(next) == EXACTFAA
|| ( OP(next) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(next)))))
{
/* These differ in just one bit */
U8 mask = ~ ('A' ^ 'a');
assert(isALPHA_A(* STRING(next)));
/* Then replace it by an ANYOFM node, with
* the mask set to the complement of the
* bit that differs between upper and lower
* case, and the lowest code point of the
* pair (which the '&' forces) */
OP(next) = ANYOFM;
ARG_SET(next, *STRING(next) & mask);
FLAGS(next) = mask;
}
if (flags & SCF_DO_STCLASS) {
mincount = 0;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
scan = regnext(scan);
goto optimize_curly_tail;
case CURLY:
if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM)
&& (scan->flags == stopparen))
{
mincount = 1;
maxcount = 1;
} else {
mincount = ARG1(scan);
maxcount = ARG2(scan);
}
next = regnext(scan);
if (OP(scan) == CURLYX) {
I32 lp = (data ? *(data->last_closep) : 0);
scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX);
}
scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS;
next_is_eval = (OP(scan) == EVAL);
do_curly:
if (flags & SCF_DO_SUBSTR) {
if (mincount == 0)
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
pos_before = data->pos_min;
}
if (data) {
fl = data->flags;
data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL);
if (is_inf)
data->flags |= SF_IS_INF;
}
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
oclass = data->start_class;
data->start_class = &this_class;
f |= SCF_DO_STCLASS_AND;
f &= ~SCF_DO_STCLASS_OR;
}
/* Exclude from super-linear cache processing any {n,m}
regops for which the combination of input pos and regex
pos is not enough information to determine if a match
will be possible.
For example, in the regex /foo(bar\s*){4,8}baz/ with the
regex pos at the \s*, the prospects for a match depend not
only on the input position but also on how many (bar\s*)
repeats into the {4,8} we are. */
if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY))
f &= ~SCF_WHILEM_VISITED_POS;
/* This will finish on WHILEM, setting scan, or on NULL: */
/* recurse study_chunk() on loop bodies */
minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext,
last, data, stopparen, recursed_depth, NULL,
(mincount == 0
? (f & ~SCF_DO_SUBSTR)
: f)
,depth+1);
if (flags & SCF_DO_STCLASS)
data->start_class = oclass;
if (mincount == 0 || minnext == 0) {
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
}
else if (flags & SCF_DO_STCLASS_AND) {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&this_class, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
} else { /* Non-zero len */
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
}
else if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
flags &= ~SCF_DO_STCLASS;
}
if (!scan) /* It was not CURLYX, but CURLY. */
scan = next;
if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR)
/* ? quantifier ok, except for (?{ ... }) */
&& (next_is_eval || !(mincount == 0 && maxcount == 1))
&& (minnext == 0) && (deltanext == 0)
&& data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR))
&& maxcount <= REG_INFTY/3) /* Complement check for big
count */
{
_WARN_HELPER(RExC_precomp_end, packWARN(WARN_REGEXP),
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP),
"Quantifier unexpected on zero-length expression "
"in regex m/%" UTF8f "/",
UTF8fARG(UTF, RExC_precomp_end - RExC_precomp,
RExC_precomp)));
}
if ( ( minnext > 0 && mincount >= SSize_t_MAX / minnext )
|| min >= SSize_t_MAX - minnext * mincount )
{
FAIL("Regexp out of space");
}
min += minnext * mincount;
is_inf_internal |= deltanext == SSize_t_MAX
|| (maxcount == REG_INFTY && minnext + deltanext > 0);
is_inf |= is_inf_internal;
if (is_inf) {
delta = SSize_t_MAX;
} else {
delta += (minnext + deltanext) * maxcount
- minnext * mincount;
}
/* Try powerful optimization CURLYX => CURLYN. */
if ( OP(oscan) == CURLYX && data
&& data->flags & SF_IN_PAR
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext && minnext == 1 ) {
/* Try to optimize to CURLYN. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS;
regnode * const nxt1 = nxt;
#ifdef DEBUGGING
regnode *nxt2;
#endif
/* Skip open. */
nxt = regnext(nxt);
if (!REGNODE_SIMPLE(OP(nxt))
&& !(PL_regkind[OP(nxt)] == EXACT
&& STR_LEN(nxt) == 1))
goto nogo;
#ifdef DEBUGGING
nxt2 = nxt;
#endif
nxt = regnext(nxt);
if (OP(nxt) != CLOSE)
goto nogo;
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->while*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt) + 2;
}
/* Now we know that nxt2 is the only contents: */
oscan->flags = (U8)ARG(nxt);
OP(oscan) = CURLYN;
OP(nxt1) = NOTHING; /* was OPEN. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */
NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */
#endif
}
nogo:
/* Try optimization CURLYX => CURLYM. */
if ( OP(oscan) == CURLYX && data
&& !(data->flags & SF_HAS_PAR)
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext /* atom is fixed width */
&& minnext != 0 /* CURLYM can't handle zero width */
/* Nor characters whose fold at run-time may be
* multi-character */
&& ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN)
) {
/* XXXX How to optimize if data == 0? */
/* Optimize to a simpler form. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */
regnode *nxt2;
OP(oscan) = CURLYM;
while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/
&& (OP(nxt2) != WHILEM))
nxt = nxt2;
OP(nxt2) = SUCCEED; /* Whas WHILEM */
/* Need to optimize away parenths. */
if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) {
/* Set the parenth number. */
regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/
oscan->flags = (U8)ARG(nxt);
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->NOTHING*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt2)
+ 1;
}
OP(nxt1) = OPTIMIZED; /* was OPEN. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */
NEXT_OFF(nxt + 1) = 0; /* just for consistency. */
#endif
#if 0
while ( nxt1 && (OP(nxt1) != WHILEM)) {
regnode *nnxt = regnext(nxt1);
if (nnxt == nxt) {
if (reg_off_by_arg[OP(nxt1)])
ARG_SET(nxt1, nxt2 - nxt1);
else if (nxt2 - nxt1 < U16_MAX)
NEXT_OFF(nxt1) = nxt2 - nxt1;
else
OP(nxt) = NOTHING; /* Cannot beautify */
}
nxt1 = nnxt;
}
#endif
/* Optimize again: */
/* recurse study_chunk() on optimised CURLYX => CURLYM */
study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt,
NULL, stopparen, recursed_depth, NULL, 0,
depth+1);
}
else
oscan->flags = 0;
}
else if ((OP(oscan) == CURLYX)
&& (flags & SCF_WHILEM_VISITED_POS)
/* See the comment on a similar expression above.
However, this time it's not a subexpression
we care about, but the expression itself. */
&& (maxcount == REG_INFTY)
&& data) {
/* This stays as CURLYX, we can put the count/of pair. */
/* Find WHILEM (as in regexec.c) */
regnode *nxt = oscan + NEXT_OFF(oscan);
if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */
nxt += ARG(nxt);
nxt = PREVOPER(nxt);
if (nxt->flags & 0xf) {
/* we've already set whilem count on this node */
} else if (++data->whilem_c < 16) {
assert(data->whilem_c <= RExC_whilem_seen);
nxt->flags = (U8)(data->whilem_c
| (RExC_whilem_seen << 4)); /* On WHILEM */
}
}
if (data && fl & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (flags & SCF_DO_SUBSTR) {
SV *last_str = NULL;
STRLEN last_chrs = 0;
int counted = mincount != 0;
if (data->last_end > 0 && mincount != 0) { /* Ends with a
string. */
SSize_t b = pos_before >= data->last_start_min
? pos_before : data->last_start_min;
STRLEN l;
const char * const s = SvPV_const(data->last_found, l);
SSize_t old = b - data->last_start_min;
assert(old >= 0);
if (UTF)
old = utf8_hop_forward((U8*)s, old,
(U8 *) SvEND(data->last_found))
- (U8*)s;
l -= old;
/* Get the added string: */
last_str = newSVpvn_utf8(s + old, l, UTF);
last_chrs = UTF ? utf8_length((U8*)(s + old),
(U8*)(s + old + l)) : l;
if (deltanext == 0 && pos_before == b) {
/* What was added is a constant string */
if (mincount > 1) {
SvGROW(last_str, (mincount * l) + 1);
repeatcpy(SvPVX(last_str) + l,
SvPVX_const(last_str), l,
mincount - 1);
SvCUR_set(last_str, SvCUR(last_str) * mincount);
/* Add additional parts. */
SvCUR_set(data->last_found,
SvCUR(data->last_found) - l);
sv_catsv(data->last_found, last_str);
{
SV * sv = data->last_found;
MAGIC *mg =
SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += last_chrs * (mincount-1);
}
last_chrs *= mincount;
data->last_end += l * (mincount - 1);
}
} else {
/* start offset must point into the last copy */
data->last_start_min += minnext * (mincount - 1);
data->last_start_max =
is_inf
? SSize_t_MAX
: data->last_start_max +
(maxcount - 1) * (minnext + data->pos_delta);
}
}
/* It is counted once already... */
data->pos_min += minnext * (mincount - counted);
#if 0
Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf
" SSize_t_MAX=%" UVuf " minnext=%" UVuf
" maxcount=%" UVuf " mincount=%" UVuf "\n",
(UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount,
(UV)mincount);
if (deltanext != SSize_t_MAX)
Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n",
(UV)(-counted * deltanext + (minnext + deltanext) * maxcount
- minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta));
#endif
if (deltanext == SSize_t_MAX
|| -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta)
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += - counted * deltanext +
(minnext + deltanext) * maxcount - minnext * mincount;
if (mincount != maxcount) {
/* Cannot extend fixed substrings found inside
the group. */
scan_commit(pRExC_state, data, minlenp, is_inf);
if (mincount && last_str) {
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg)
mg->mg_len = -1;
sv_setsv(sv, last_str);
data->last_end = data->pos_min;
data->last_start_min = data->pos_min - last_chrs;
data->last_start_max = is_inf
? SSize_t_MAX
: data->pos_min + data->pos_delta - last_chrs;
}
data->cur_is_floating = 1; /* float */
}
SvREFCNT_dec(last_str);
}
if (data && (fl & SF_HAS_EVAL))
data->flags |= SF_HAS_EVAL;
optimize_curly_tail:
rck_elide_nothing(oscan);
continue;
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d",
OP(scan));
#endif
case REF:
case CLUMP:
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) {
if (OP(scan) == CLUMP) {
/* Actually is any start char, but very few code points
* aren't start characters */
ssc_match_all_cp(data->start_class);
}
else {
ssc_anything(data->start_class);
}
}
flags &= ~SCF_DO_STCLASS;
break;
}
}
else if (OP(scan) == LNBREAK) {
if (flags & SCF_DO_STCLASS) {
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE], FALSE);
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE],
FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg for
* 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
min++;
if (delta != SSize_t_MAX)
delta++; /* Because of the 2 char string cr-lf */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += 1;
if (data->pos_delta != SSize_t_MAX) {
data->pos_delta += 1;
}
data->cur_is_floating = 1; /* float */
}
}
else if (REGNODE_SIMPLE(OP(scan))) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min++;
}
min++;
if (flags & SCF_DO_STCLASS) {
bool invert = 0;
SV* my_invlist = NULL;
U8 namedclass;
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
/* Some of the logic below assumes that switching
locale on will only add false positives. */
switch (OP(scan)) {
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d",
OP(scan));
#endif
case SANY:
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_match_all_cp(data->start_class);
break;
case REG_ANY:
{
SV* REG_ANY_invlist = _new_invlist(2);
REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist,
'\n');
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert, hence all but \n
*/
);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert */
);
ssc_clear_locale(data->start_class);
}
SvREFCNT_dec_NN(REG_ANY_invlist);
}
break;
case ANYOFD:
case ANYOFL:
case ANYOFPOSIXL:
case ANYOFH:
case ANYOF:
if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class,
(regnode_charclass *) scan);
else
ssc_or(pRExC_state, data->start_class,
(regnode_charclass *) scan);
break;
case NANYOFM:
case ANYOFM:
{
SV* cp_list = get_ANYOFM_contents(scan);
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class, cp_list, invert);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, cp_list, invert);
}
SvREFCNT_dec_NN(cp_list);
break;
}
case NPOSIXL:
invert = 1;
/* FALLTHROUGH */
case POSIXL:
namedclass = classnum_to_namedclass(FLAGS(scan)) + invert;
if (flags & SCF_DO_STCLASS_AND) {
bool was_there = cBOOL(
ANYOF_POSIXL_TEST(data->start_class,
namedclass));
ANYOF_POSIXL_ZERO(data->start_class);
if (was_there) { /* Do an AND */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
/* No individual code points can now match */
data->start_class->invlist
= sv_2mortal(_new_invlist(0));
}
else {
int complement = namedclass + ((invert) ? -1 : 1);
assert(flags & SCF_DO_STCLASS_OR);
/* If the complement of this class was already there,
* the result is that they match all code points,
* (\d + \D == everything). Remove the classes from
* future consideration. Locale is not relevant in
* this case */
if (ANYOF_POSIXL_TEST(data->start_class, complement)) {
ssc_match_all_cp(data->start_class);
ANYOF_POSIXL_CLEAR(data->start_class, namedclass);
ANYOF_POSIXL_CLEAR(data->start_class, complement);
}
else { /* The usual case; just add this class to the
existing set */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
}
break;
case NPOSIXA: /* For these, we always know the exact set of
what's matched */
invert = 1;
/* FALLTHROUGH */
case POSIXA:
my_invlist = invlist_clone(PL_Posix_ptrs[FLAGS(scan)], NULL);
goto join_posix_and_ascii;
case NPOSIXD:
case NPOSIXU:
invert = 1;
/* FALLTHROUGH */
case POSIXD:
case POSIXU:
my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)], NULL);
/* NPOSIXD matches all upper Latin1 code points unless the
* target string being matched is UTF-8, which is
* unknowable until match time. Since we are going to
* invert, we want to get rid of all of them so that the
* inversion will match all */
if (OP(scan) == NPOSIXD) {
_invlist_subtract(my_invlist, PL_UpperLatin1,
&my_invlist);
}
join_posix_and_ascii:
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, my_invlist, invert);
ssc_clear_locale(data->start_class);
}
else {
assert(flags & SCF_DO_STCLASS_OR);
ssc_union(data->start_class, my_invlist, invert);
}
SvREFCNT_dec(my_invlist);
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) {
data->flags |= (OP(scan) == MEOL
? SF_BEFORE_MEOL
: SF_BEFORE_SEOL);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
else if ( PL_regkind[OP(scan)] == BRANCHJ
/* Lookbehind, or need to calculate parens/evals/stclass: */
&& (scan->flags || data || (flags & SCF_DO_STCLASS))
&& (OP(scan) == IFMATCH || OP(scan) == UNLESSM))
{
if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY
|| OP(scan) == UNLESSM )
{
/* Negative Lookahead/lookbehind
In this case we can't do fixed string optimisation.
*/
SSize_t deltanext, minnext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* recurse study_chunk() for lookahead body */
minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext,
last, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1);
if (scan->flags) {
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| minnext > (I32)U8_MAX
|| minnext + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
/* The 'next_off' field has been repurposed to count the
* additional starting positions to try beyond the initial
* one. (This leaves it at 0 for non-variable length
* matches to avoid breakage for those not using this
* extension) */
if (deltanext) {
scan->next_off = deltanext;
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__VLB,
"Variable length lookbehind is experimental");
}
scan->flags = (U8)minnext + deltanext;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (f & SCF_DO_STCLASS_AND) {
if (flags & SCF_DO_STCLASS_OR) {
/* OR before, AND after: ideally we would recurse with
* data_fake to get the AND applied by study of the
* remainder of the pattern, and then derecurse;
* *** HACK *** for now just treat as "no information".
* See [perl #56690].
*/
ssc_init(pRExC_state, data->start_class);
} else {
/* AND before and after: combine and continue. These
* assertions are zero-length, so can match an EMPTY
* string */
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
}
}
#if PERL_ENABLE_POSITIVE_ASSERTION_STUDY
else {
/* Positive Lookahead/lookbehind
In this case we can do fixed string optimisation,
but we must be careful about it. Note in the case of
lookbehind the positions will be offset by the minimum
length of the pattern, something we won't know about
until after the recurse.
*/
SSize_t deltanext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
/* We use SAVEFREEPV so that when the full compile
is finished perl will clean up the allocated
minlens when it's all done. This way we don't
have to worry about freeing them when we know
they wont be used, which would be a pain.
*/
SSize_t *minnextp;
Newx( minnextp, 1, SSize_t );
SAVEFREEPV(minnextp);
if (data) {
StructCopy(data, &data_fake, scan_data_t);
if ((flags & SCF_DO_SUBSTR) && data->last_found) {
f |= SCF_DO_SUBSTR;
if (scan->flags)
scan_commit(pRExC_state, &data_fake, minlenp, is_inf);
data_fake.last_found=newSVsv(data->last_found);
}
}
else
data_fake.last_closep = &fake;
data_fake.flags = 0;
data_fake.substrs[0].flags = 0;
data_fake.substrs[1].flags = 0;
data_fake.pos_delta = delta;
if (is_inf)
data_fake.flags |= SF_IS_INF;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* positive lookahead study_chunk() recursion */
*minnextp = study_chunk(pRExC_state, &nscan, minnextp,
&deltanext, last, &data_fake,
stopparen, recursed_depth, NULL,
f, depth+1);
if (scan->flags) {
assert(0); /* This code has never been tested since this
is normally not compiled */
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| *minnextp > (I32)U8_MAX
|| *minnextp + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
if (deltanext) {
scan->next_off = deltanext;
}
scan->flags = (U8)*minnextp + deltanext;
}
*minnextp += min;
if (f & SCF_DO_STCLASS_AND) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) {
int i;
if (RExC_rx->minlen<*minnextp)
RExC_rx->minlen=*minnextp;
scan_commit(pRExC_state, &data_fake, minnextp, is_inf);
SvREFCNT_dec_NN(data_fake.last_found);
for (i = 0; i < 2; i++) {
if (data_fake.substrs[i].minlenp != minlenp) {
data->substrs[i].min_offset =
data_fake.substrs[i].min_offset;
data->substrs[i].max_offset =
data_fake.substrs[i].max_offset;
data->substrs[i].minlenp =
data_fake.substrs[i].minlenp;
data->substrs[i].lookbehind += scan->flags;
}
}
}
}
}
#endif
}
else if (OP(scan) == OPEN) {
if (stopparen != (I32)ARG(scan))
pars++;
}
else if (OP(scan) == CLOSE) {
if (stopparen == (I32)ARG(scan)) {
break;
}
if ((I32)ARG(scan) == is_par) {
next = regnext(scan);
if ( next && (OP(next) != WHILEM) && next < last)
is_par = 0; /* Disable optimization */
}
if (data)
*(data->last_closep) = ARG(scan);
}
else if (OP(scan) == EVAL) {
if (data)
data->flags |= SF_HAS_EVAL;
}
else if ( PL_regkind[OP(scan)] == ENDLIKE ) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
flags &= ~SCF_DO_SUBSTR;
}
if (data && OP(scan)==ACCEPT) {
data->flags |= SCF_SEEN_ACCEPT;
if (stopmin > min)
stopmin = min;
}
}
else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */
{
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
}
else if (OP(scan) == GPOS) {
if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) &&
!(delta || is_inf || (data && data->pos_delta)))
{
if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR))
RExC_rx->intflags |= PREGf_ANCH_GPOS;
if (RExC_rx->gofs < (STRLEN)min)
RExC_rx->gofs = min;
} else {
RExC_rx->intflags |= PREGf_GPOS_FLOAT;
RExC_rx->gofs = 0;
}
}
#ifdef TRIE_STUDY_OPT
#ifdef FULL_TRIE_STUDY
else if (PL_regkind[OP(scan)] == TRIE) {
/* NOTE - There is similar code to this block above for handling
BRANCH nodes on the initial study. If you change stuff here
check there too. */
regnode *trie_node= scan;
regnode *tail= regnext(scan);
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
SSize_t max1 = 0, min1 = SSize_t_MAX;
regnode_ssc accum;
if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
if (!trie->jump) {
min1= trie->minlen;
max1= trie->maxlen;
} else {
const regnode *nextbranch= NULL;
U32 word;
for ( word=1 ; word <= trie->wordcount ; word++)
{
SSize_t deltanext=0, minnext=0, f = 0, fake;
regnode_ssc this_class;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
if (trie->jump[word]) {
if (!nextbranch)
nextbranch = trie_node + trie->jump[0];
scan= trie_node + trie->jump[word];
/* We go from the jump point to the branch that follows
it. Note this means we need the vestigal unused
branches even though they arent otherwise used. */
/* optimise study_chunk() for TRIE */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, (regnode *)nextbranch, &data_fake,
stopparen, recursed_depth, NULL, f, depth+1);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode*)nextbranch);
if (min1 > (SSize_t)(minnext + trie->minlen))
min1 = minnext + trie->minlen;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen))
max1 = minnext + deltanext + trie->maxlen;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > min + min1)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class);
}
}
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1; /* float */
}
min += min1;
if (delta != SSize_t_MAX) {
if (SSize_t_MAX - (max1 - min1) >= delta)
delta += max1 - min1;
else
delta = SSize_t_MAX;
}
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
scan= tail;
continue;
}
#else
else if (PL_regkind[OP(scan)] == TRIE) {
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
U8*bang=NULL;
min += trie->minlen;
delta += (trie->maxlen - trie->minlen);
flags &= ~SCF_DO_STCLASS; /* xxx */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += trie->minlen;
data->pos_delta += (trie->maxlen - trie->minlen);
if (trie->maxlen != trie->minlen)
data->cur_is_floating = 1; /* float */
}
if (trie->jump) /* no more substrings -- for now /grr*/
flags &= ~SCF_DO_SUBSTR;
}
#endif /* old or new */
#endif /* TRIE_STUDY_OPT */
/* Else: zero-length, ignore. */
scan = regnext(scan);
}
finish:
if (frame) {
/* we need to unwind recursion. */
depth = depth - 1;
DEBUG_STUDYDATA("frame-end", data, depth, is_inf);
DEBUG_PEEP("fend", scan, depth, flags);
/* restore previous context */
last = frame->last_regnode;
scan = frame->next_regnode;
stopparen = frame->stopparen;
recursed_depth = frame->prev_recursed_depth;
RExC_frame_last = frame->prev_frame;
frame = frame->this_prev_frame;
goto fake_study_recurse;
}
assert(!frame);
DEBUG_STUDYDATA("pre-fin", data, depth, is_inf);
*scanp = scan;
*deltap = is_inf_internal ? SSize_t_MAX : delta;
if (flags & SCF_DO_SUBSTR && is_inf)
data->pos_delta = SSize_t_MAX - data->pos_min;
if (is_par > (I32)U8_MAX)
is_par = 0;
if (is_par && pars==1 && data) {
data->flags |= SF_IN_PAR;
data->flags &= ~SF_HAS_PAR;
}
else if (pars && data) {
data->flags |= SF_HAS_PAR;
data->flags &= ~SF_IN_PAR;
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
if (flags & SCF_TRIE_RESTUDY)
data->flags |= SCF_TRIE_RESTUDY;
DEBUG_STUDYDATA("post-fin", data, depth, is_inf);
{
SSize_t final_minlen= min < stopmin ? min : stopmin;
if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) {
if (final_minlen > SSize_t_MAX - delta)
RExC_maxlen = SSize_t_MAX;
else if (RExC_maxlen < final_minlen + delta)
RExC_maxlen = final_minlen + delta;
}
return final_minlen;
}
NOT_REACHED; /* NOTREACHED */
|
124503767038519369207654054837018016597
|
regcomp.c
|
257709146327047081328217382800700188887
|
CWE-120
|
CVE-2020-12723
|
regcomp.c in Perl before 5.30.3 allows a buffer overflow via a crafted regular expression because of recursive S_study_chunk calls.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-12723
|
486,837
|
perl5
|
66bbb51b93253a3f87d11c2695cfb7bdb782184a
|
https://github.com/perl/perl5
|
https://github.com/perl/perl5/commit/66bbb51b93253a3f87d11c2695cfb7bdb782184a
|
study_chunk: avoid mutating regexp program within GOSUB
gh16947 and gh17743: studying GOSUB may restudy in an inner call
(via a mix of recursion and enframing) something that an outer call
is in the middle of looking at. Let the outer frame deal with it.
(CVE-2020-12723)
(cherry picked from commit c4033e740bd18d9fbe3456a9db2ec2053cdc5271)
| 0
|
STATIC SSize_t
S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp,
SSize_t *minlenp, SSize_t *deltap,
regnode *last,
scan_data_t *data,
I32 stopparen,
U32 recursed_depth,
regnode_ssc *and_withp,
U32 flags, U32 depth, bool was_mutate_ok)
/* scanp: Start here (read-write). */
/* deltap: Write maxlen-minlen here. */
/* last: Stop before this one. */
/* data: string data about the pattern */
/* stopparen: treat close N as END */
/* recursed: which subroutines have we recursed into */
/* and_withp: Valid if flags & SCF_DO_STCLASS_OR */
{
dVAR;
/* There must be at least this number of characters to match */
SSize_t min = 0;
I32 pars = 0, code;
regnode *scan = *scanp, *next;
SSize_t delta = 0;
int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF);
int is_inf_internal = 0; /* The studied chunk is infinite */
I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0;
scan_data_t data_fake;
SV *re_trie_maxbuff = NULL;
regnode *first_non_open = scan;
SSize_t stopmin = SSize_t_MAX;
scan_frame *frame = NULL;
GET_RE_DEBUG_FLAGS_DECL;
PERL_ARGS_ASSERT_STUDY_CHUNK;
RExC_study_started= 1;
Zero(&data_fake, 1, scan_data_t);
if ( depth == 0 ) {
while (first_non_open && OP(first_non_open) == OPEN)
first_non_open=regnext(first_non_open);
}
fake_study_recurse:
DEBUG_r(
RExC_study_chunk_recursed_count++;
);
DEBUG_OPTIMISE_MORE_r(
{
Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p",
depth, (long)stopparen,
(unsigned long)RExC_study_chunk_recursed_count,
(unsigned long)depth, (unsigned long)recursed_depth,
scan,
last);
if (recursed_depth) {
U32 i;
U32 j;
for ( j = 0 ; j < recursed_depth ; j++ ) {
for ( i = 0 ; i < (U32)RExC_total_parens ; i++ ) {
if (
PAREN_TEST(RExC_study_chunk_recursed +
( j * RExC_study_chunk_recursed_bytes), i )
&& (
!j ||
!PAREN_TEST(RExC_study_chunk_recursed +
(( j - 1 ) * RExC_study_chunk_recursed_bytes), i)
)
) {
Perl_re_printf( aTHX_ " %d",(int)i);
break;
}
}
if ( j + 1 < recursed_depth ) {
Perl_re_printf( aTHX_ ",");
}
}
}
Perl_re_printf( aTHX_ "\n");
}
);
while ( scan && OP(scan) != END && scan < last ){
UV min_subtract = 0; /* How mmany chars to subtract from the minimum
node length to get a real minimum (because
the folded version may be shorter) */
bool unfolded_multi_char = FALSE;
/* avoid mutating ops if we are anywhere within the recursed or
* enframed handling for a GOSUB: the outermost level will handle it.
*/
bool mutate_ok = was_mutate_ok && !(frame && frame->in_gosub);
/* Peephole optimizer: */
DEBUG_STUDYDATA("Peep", data, depth, is_inf);
DEBUG_PEEP("Peep", scan, depth, flags);
/* The reason we do this here is that we need to deal with things like
* /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT
* parsing code, as each (?:..) is handled by a different invocation of
* reg() -- Yves
*/
if (mutate_ok)
JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0);
/* Follow the next-chain of the current node and optimize
away all the NOTHINGs from it.
*/
rck_elide_nothing(scan);
/* The principal pseudo-switch. Cannot be a switch, since we
look into several different things. */
if ( OP(scan) == DEFINEP ) {
SSize_t minlen = 0;
SSize_t deltanext = 0;
SSize_t fake_last_close = 0;
I32 f = SCF_IN_DEFINE;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
scan = regnext(scan);
assert( OP(scan) == IFTHEN );
DEBUG_PEEP("expect IFTHEN", scan, depth, flags);
data_fake.last_closep= &fake_last_close;
minlen = *minlenp;
next = regnext(scan);
scan = NEXTOPER(NEXTOPER(scan));
DEBUG_PEEP("scan", scan, depth, flags);
DEBUG_PEEP("next", next, depth, flags);
/* we suppose the run is continuous, last=next...
* NOTE we dont use the return here! */
/* DEFINEP study_chunk() recursion */
(void)study_chunk(pRExC_state, &scan, &minlen,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1, mutate_ok);
scan = next;
} else
if (
OP(scan) == BRANCH ||
OP(scan) == BRANCHJ ||
OP(scan) == IFTHEN
) {
next = regnext(scan);
code = OP(scan);
/* The op(next)==code check below is to see if we
* have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN"
* IFTHEN is special as it might not appear in pairs.
* Not sure whether BRANCH-BRANCHJ is possible, regardless
* we dont handle it cleanly. */
if (OP(next) == code || code == IFTHEN) {
/* NOTE - There is similar code to this block below for
* handling TRIE nodes on a re-study. If you change stuff here
* check there too. */
SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0;
regnode_ssc accum;
regnode * const startbranch=scan;
if (flags & SCF_DO_SUBSTR) {
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
while (OP(scan) == code) {
SSize_t deltanext, minnext, fake;
I32 f = 0;
regnode_ssc this_class;
DEBUG_PEEP("Branch", scan, depth, flags);
num++;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
next = regnext(scan);
scan = NEXTOPER(scan); /* everything */
if (code != BRANCH) /* everything but BRANCH */
scan = NEXTOPER(scan);
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
/* we suppose the run is continuous, last=next...*/
/* recurse study_chunk() for each BRANCH in an alternation */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, next, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1,
mutate_ok);
if (min1 > minnext)
min1 = minnext;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < minnext + deltanext)
max1 = minnext + deltanext;
scan = next;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > minnext)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class);
}
if (code == IFTHEN && num < 2) /* Empty ELSE branch */
min1 = 0;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
if (data->pos_delta >= SSize_t_MAX - (max1 - min1))
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1;
}
min += min1;
if (delta == SSize_t_MAX
|| SSize_t_MAX - delta - (max1 - min1) < 0)
delta = SSize_t_MAX;
else
delta += max1 - min1;
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
if (PERL_ENABLE_TRIE_OPTIMISATION
&& OP(startbranch) == BRANCH
&& mutate_ok
) {
/* demq.
Assuming this was/is a branch we are dealing with: 'scan'
now points at the item that follows the branch sequence,
whatever it is. We now start at the beginning of the
sequence and look for subsequences of
BRANCH->EXACT=>x1
BRANCH->EXACT=>x2
tail
which would be constructed from a pattern like
/A|LIST|OF|WORDS/
If we can find such a subsequence we need to turn the first
element into a trie and then add the subsequent branch exact
strings to the trie.
We have two cases
1. patterns where the whole set of branches can be
converted.
2. patterns where only a subset can be converted.
In case 1 we can replace the whole set with a single regop
for the trie. In case 2 we need to keep the start and end
branches so
'BRANCH EXACT; BRANCH EXACT; BRANCH X'
becomes BRANCH TRIE; BRANCH X;
There is an additional case, that being where there is a
common prefix, which gets split out into an EXACT like node
preceding the TRIE node.
If x(1..n)==tail then we can do a simple trie, if not we make
a "jump" trie, such that when we match the appropriate word
we "jump" to the appropriate tail node. Essentially we turn
a nested if into a case structure of sorts.
*/
int made=0;
if (!re_trie_maxbuff) {
re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1);
if (!SvIOK(re_trie_maxbuff))
sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT);
}
if ( SvIV(re_trie_maxbuff)>=0 ) {
regnode *cur;
regnode *first = (regnode *)NULL;
regnode *last = (regnode *)NULL;
regnode *tail = scan;
U8 trietype = 0;
U32 count=0;
/* var tail is used because there may be a TAIL
regop in the way. Ie, the exacts will point to the
thing following the TAIL, but the last branch will
point at the TAIL. So we advance tail. If we
have nested (?:) we may have to move through several
tails.
*/
while ( OP( tail ) == TAIL ) {
/* this is the TAIL generated by (?:) */
tail = regnext( tail );
}
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n",
depth+1,
"Looking for TRIE'able sequences. Tail node is ",
(UV) REGNODE_OFFSET(tail),
SvPV_nolen_const( RExC_mysv )
);
});
/*
Step through the branches
cur represents each branch,
noper is the first thing to be matched as part
of that branch
noper_next is the regnext() of that node.
We normally handle a case like this
/FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also
support building with NOJUMPTRIE, which restricts
the trie logic to structures like /FOO|BAR/.
If noper is a trieable nodetype then the branch is
a possible optimization target. If we are building
under NOJUMPTRIE then we require that noper_next is
the same as scan (our current position in the regex
program).
Once we have two or more consecutive such branches
we can create a trie of the EXACT's contents and
stitch it in place into the program.
If the sequence represents all of the branches in
the alternation we replace the entire thing with a
single TRIE node.
Otherwise when it is a subsequence we need to
stitch it in place and replace only the relevant
branches. This means the first branch has to remain
as it is used by the alternation logic, and its
next pointer, and needs to be repointed at the item
on the branch chain following the last branch we
have optimized away.
This could be either a BRANCH, in which case the
subsequence is internal, or it could be the item
following the branch sequence in which case the
subsequence is at the end (which does not
necessarily mean the first node is the start of the
alternation).
TRIE_TYPE(X) is a define which maps the optype to a
trietype.
optype | trietype
----------------+-----------
NOTHING | NOTHING
EXACT | EXACT
EXACT_ONLY8 | EXACT
EXACTFU | EXACTFU
EXACTFU_ONLY8 | EXACTFU
EXACTFUP | EXACTFU
EXACTFAA | EXACTFAA
EXACTL | EXACTL
EXACTFLU8 | EXACTFLU8
*/
#define TRIE_TYPE(X) ( ( NOTHING == (X) ) \
? NOTHING \
: ( EXACT == (X) || EXACT_ONLY8 == (X) ) \
? EXACT \
: ( EXACTFU == (X) \
|| EXACTFU_ONLY8 == (X) \
|| EXACTFUP == (X) ) \
? EXACTFU \
: ( EXACTFAA == (X) ) \
? EXACTFAA \
: ( EXACTL == (X) ) \
? EXACTL \
: ( EXACTFLU8 == (X) ) \
? EXACTFLU8 \
: 0 )
/* dont use tail as the end marker for this traverse */
for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) {
regnode * const noper = NEXTOPER( cur );
U8 noper_type = OP( noper );
U8 noper_trietype = TRIE_TYPE( noper_type );
#if defined(DEBUGGING) || defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0;
#endif
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %d:%s (%d)",
depth+1,
REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) );
regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state);
Perl_re_printf( aTHX_ " -> %d:%s",
REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv));
if ( noper_next ) {
regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state);
Perl_re_printf( aTHX_ "\t=> %d:%s\t",
REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv));
}
Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype]
);
});
/* Is noper a trieable nodetype that can be merged
* with the current trie (if there is one)? */
if ( noper_trietype
&&
(
( noper_trietype == NOTHING )
|| ( trietype == NOTHING )
|| ( trietype == noper_trietype )
)
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
&& count < U16_MAX)
{
/* Handle mergable triable node Either we are
* the first node in a new trieable sequence,
* in which case we do some bookkeeping,
* otherwise we update the end pointer. */
if ( !first ) {
first = cur;
if ( noper_trietype == NOTHING ) {
#if !defined(DEBUGGING) && !defined(NOJUMPTRIE)
regnode * const noper_next = regnext( noper );
U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0;
U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0;
#endif
if ( noper_next_trietype ) {
trietype = noper_next_trietype;
} else if (noper_next_type) {
/* a NOTHING regop is 1 regop wide.
* We need at least two for a trie
* so we can't merge this in */
first = NULL;
}
} else {
trietype = noper_trietype;
}
} else {
if ( trietype == NOTHING )
trietype = noper_trietype;
last = cur;
}
if (first)
count++;
} /* end handle mergable triable node */
else {
/* handle unmergable node -
* noper may either be a triable node which can
* not be tried together with the current trie,
* or a non triable node */
if ( last ) {
/* If last is set and trietype is not
* NOTHING then we have found at least two
* triable branch sequences in a row of a
* similar trietype so we can turn them
* into a trie. If/when we allow NOTHING to
* start a trie sequence this condition
* will be required, and it isn't expensive
* so we leave it in for now. */
if ( trietype && trietype != NOTHING )
make_trie( pRExC_state,
startbranch, first, cur, tail,
count, trietype, depth+1 );
last = NULL; /* note: we clear/update
first, trietype etc below,
so we dont do it here */
}
if ( noper_trietype
#ifdef NOJUMPTRIE
&& noper_next >= tail
#endif
){
/* noper is triable, so we can start a new
* trie sequence */
count = 1;
first = cur;
trietype = noper_trietype;
} else if (first) {
/* if we already saw a first but the
* current node is not triable then we have
* to reset the first information. */
count = 0;
first = NULL;
trietype = 0;
}
} /* end handle unmergable node */
} /* loop over branches */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ",
depth+1, SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n",
REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur),
PL_reg_name[trietype]
);
});
if ( last && trietype ) {
if ( trietype != NOTHING ) {
/* the last branch of the sequence was part of
* a trie, so we have to construct it here
* outside of the loop */
made= make_trie( pRExC_state, startbranch,
first, scan, tail, count,
trietype, depth+1 );
#ifdef TRIE_STUDY_OPT
if ( ((made == MADE_EXACT_TRIE &&
startbranch == first)
|| ( first_non_open == first )) &&
depth==0 ) {
flags |= SCF_TRIE_RESTUDY;
if ( startbranch == first
&& scan >= tail )
{
RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN;
}
}
#endif
} else {
/* at this point we know whatever we have is a
* NOTHING sequence/branch AND if 'startbranch'
* is 'first' then we can turn the whole thing
* into a NOTHING
*/
if ( startbranch == first ) {
regnode *opt;
/* the entire thing is a NOTHING sequence,
* something like this: (?:|) So we can
* turn it into a plain NOTHING op. */
DEBUG_TRIE_COMPILE_r({
regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state);
Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n",
depth+1,
SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur));
});
OP(startbranch)= NOTHING;
NEXT_OFF(startbranch)= tail - startbranch;
for ( opt= startbranch + 1; opt < tail ; opt++ )
OP(opt)= OPTIMIZED;
}
}
} /* end if ( last) */
} /* TRIE_MAXBUF is non zero */
} /* do trie */
}
else if ( code == BRANCHJ ) { /* single branch is optimized. */
scan = NEXTOPER(NEXTOPER(scan));
} else /* single branch is optimized. */
scan = NEXTOPER(scan);
continue;
} else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) {
I32 paren = 0;
regnode *start = NULL;
regnode *end = NULL;
U32 my_recursed_depth= recursed_depth;
if (OP(scan) != SUSPEND) { /* GOSUB */
/* Do setup, note this code has side effects beyond
* the rest of this block. Specifically setting
* RExC_recurse[] must happen at least once during
* study_chunk(). */
paren = ARG(scan);
RExC_recurse[ARG2L(scan)] = scan;
start = REGNODE_p(RExC_open_parens[paren]);
end = REGNODE_p(RExC_close_parens[paren]);
/* NOTE we MUST always execute the above code, even
* if we do nothing with a GOSUB */
if (
( flags & SCF_IN_DEFINE )
||
(
(is_inf_internal || is_inf || (data && data->flags & SF_IS_INF))
&&
( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 )
)
) {
/* no need to do anything here if we are in a define. */
/* or we are after some kind of infinite construct
* so we can skip recursing into this item.
* Since it is infinite we will not change the maxlen
* or delta, and if we miss something that might raise
* the minlen it will merely pessimise a little.
*
* Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/
* might result in a minlen of 1 and not of 4,
* but this doesn't make us mismatch, just try a bit
* harder than we should.
* */
scan= regnext(scan);
continue;
}
if (
!recursed_depth
||
!PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren)
) {
/* it is quite possible that there are more efficient ways
* to do this. We maintain a bitmap per level of recursion
* of which patterns we have entered so we can detect if a
* pattern creates a possible infinite loop. When we
* recurse down a level we copy the previous levels bitmap
* down. When we are at recursion level 0 we zero the top
* level bitmap. It would be nice to implement a different
* more efficient way of doing this. In particular the top
* level bitmap may be unnecessary.
*/
if (!recursed_depth) {
Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8);
} else {
Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes),
RExC_study_chunk_recursed_bytes, U8);
}
/* we havent recursed into this paren yet, so recurse into it */
DEBUG_STUDYDATA("gosub-set", data, depth, is_inf);
PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren);
my_recursed_depth= recursed_depth + 1;
} else {
DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf);
/* some form of infinite recursion, assume infinite length
* */
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1;
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
start= NULL; /* reset start so we dont recurse later on. */
}
} else {
paren = stopparen;
start = scan + 2;
end = regnext(scan);
}
if (start) {
scan_frame *newframe;
assert(end);
if (!RExC_frame_last) {
Newxz(newframe, 1, scan_frame);
SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe);
RExC_frame_head= newframe;
RExC_frame_count++;
} else if (!RExC_frame_last->next_frame) {
Newxz(newframe, 1, scan_frame);
RExC_frame_last->next_frame= newframe;
newframe->prev_frame= RExC_frame_last;
RExC_frame_count++;
} else {
newframe= RExC_frame_last->next_frame;
}
RExC_frame_last= newframe;
newframe->next_regnode = regnext(scan);
newframe->last_regnode = last;
newframe->stopparen = stopparen;
newframe->prev_recursed_depth = recursed_depth;
newframe->this_prev_frame= frame;
newframe->in_gosub = (
(frame && frame->in_gosub) || OP(scan) == GOSUB
);
DEBUG_STUDYDATA("frame-new", data, depth, is_inf);
DEBUG_PEEP("fnew", scan, depth, flags);
frame = newframe;
scan = start;
stopparen = paren;
last = end;
depth = depth + 1;
recursed_depth= my_recursed_depth;
continue;
}
}
else if ( OP(scan) == EXACT
|| OP(scan) == EXACT_ONLY8
|| OP(scan) == EXACTL)
{
SSize_t l = STR_LEN(scan);
UV uc;
assert(l);
if (UTF) {
const U8 * const s = (U8*)STRING(scan);
uc = utf8_to_uvchr_buf(s, s + l, NULL);
l = utf8_length(s, s + l);
} else {
uc = *((U8*)STRING(scan));
}
min += l;
if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */
/* The code below prefers earlier match for fixed
offset, later match for variable offset. */
if (data->last_end == -1) { /* Update the start info. */
data->last_start_min = data->pos_min;
data->last_start_max = is_inf
? SSize_t_MAX : data->pos_min + data->pos_delta;
}
sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan));
if (UTF)
SvUTF8_on(data->last_found);
{
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += utf8_length((U8*)STRING(scan),
(U8*)STRING(scan)+STR_LEN(scan));
}
data->last_end = data->pos_min + l;
data->pos_min += l; /* As in the first entry. */
data->flags &= ~SF_BEFORE_EOL;
}
/* ANDing the code point leaves at most it, and not in locale, and
* can't match null string */
if (flags & SCF_DO_STCLASS_AND) {
ssc_cp_and(data->start_class, uc);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ssc_clear_locale(data->start_class);
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_add_cp(data->start_class, uc);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
else if (PL_regkind[OP(scan)] == EXACT) {
/* But OP != EXACT!, so is EXACTFish */
SSize_t l = STR_LEN(scan);
const U8 * s = (U8*)STRING(scan);
/* Search for fixed substrings supports EXACT only. */
if (flags & SCF_DO_SUBSTR) {
assert(data);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (UTF) {
l = utf8_length(s, s + l);
}
if (unfolded_multi_char) {
RExC_seen |= REG_UNFOLDED_MULTI_SEEN;
}
min += l - min_subtract;
assert (min >= 0);
delta += min_subtract;
if (flags & SCF_DO_SUBSTR) {
data->pos_min += l - min_subtract;
if (data->pos_min < 0) {
data->pos_min = 0;
}
data->pos_delta += min_subtract;
if (min_subtract) {
data->cur_is_floating = 1; /* float */
}
}
if (flags & SCF_DO_STCLASS) {
SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan);
assert(EXACTF_invlist);
if (flags & SCF_DO_STCLASS_AND) {
if (OP(scan) != EXACTFL)
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
ANYOF_POSIXL_ZERO(data->start_class);
ssc_intersection(data->start_class, EXACTF_invlist, FALSE);
}
else { /* SCF_DO_STCLASS_OR */
ssc_union(data->start_class, EXACTF_invlist, FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
SvREFCNT_dec(EXACTF_invlist);
}
}
else if (REGNODE_VARIES(OP(scan))) {
SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0;
I32 fl = 0, f = flags;
regnode * const oscan = scan;
regnode_ssc this_class;
regnode_ssc *oclass = NULL;
I32 next_is_eval = 0;
switch (PL_regkind[OP(scan)]) {
case WHILEM: /* End of (?:...)* . */
scan = NEXTOPER(scan);
goto finish;
case PLUS:
if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) {
next = NEXTOPER(scan);
if ( OP(next) == EXACT
|| OP(next) == EXACT_ONLY8
|| OP(next) == EXACTL
|| (flags & SCF_DO_STCLASS))
{
mincount = 1;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
}
if (flags & SCF_DO_SUBSTR)
data->pos_min++;
min++;
/* FALLTHROUGH */
case STAR:
next = NEXTOPER(scan);
/* This temporary node can now be turned into EXACTFU, and
* must, as regexec.c doesn't handle it */
if (OP(next) == EXACTFU_S_EDGE && mutate_ok) {
OP(next) = EXACTFU;
}
if ( STR_LEN(next) == 1
&& isALPHA_A(* STRING(next))
&& ( OP(next) == EXACTFAA
|| ( OP(next) == EXACTFU
&& ! HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(* STRING(next))))
&& mutate_ok
) {
/* These differ in just one bit */
U8 mask = ~ ('A' ^ 'a');
assert(isALPHA_A(* STRING(next)));
/* Then replace it by an ANYOFM node, with
* the mask set to the complement of the
* bit that differs between upper and lower
* case, and the lowest code point of the
* pair (which the '&' forces) */
OP(next) = ANYOFM;
ARG_SET(next, *STRING(next) & mask);
FLAGS(next) = mask;
}
if (flags & SCF_DO_STCLASS) {
mincount = 0;
maxcount = REG_INFTY;
next = regnext(scan);
scan = NEXTOPER(scan);
goto do_curly;
}
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
scan = regnext(scan);
goto optimize_curly_tail;
case CURLY:
if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM)
&& (scan->flags == stopparen))
{
mincount = 1;
maxcount = 1;
} else {
mincount = ARG1(scan);
maxcount = ARG2(scan);
}
next = regnext(scan);
if (OP(scan) == CURLYX) {
I32 lp = (data ? *(data->last_closep) : 0);
scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX);
}
scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS;
next_is_eval = (OP(scan) == EVAL);
do_curly:
if (flags & SCF_DO_SUBSTR) {
if (mincount == 0)
scan_commit(pRExC_state, data, minlenp, is_inf);
/* Cannot extend fixed substrings */
pos_before = data->pos_min;
}
if (data) {
fl = data->flags;
data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL);
if (is_inf)
data->flags |= SF_IS_INF;
}
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
oclass = data->start_class;
data->start_class = &this_class;
f |= SCF_DO_STCLASS_AND;
f &= ~SCF_DO_STCLASS_OR;
}
/* Exclude from super-linear cache processing any {n,m}
regops for which the combination of input pos and regex
pos is not enough information to determine if a match
will be possible.
For example, in the regex /foo(bar\s*){4,8}baz/ with the
regex pos at the \s*, the prospects for a match depend not
only on the input position but also on how many (bar\s*)
repeats into the {4,8} we are. */
if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY))
f &= ~SCF_WHILEM_VISITED_POS;
/* This will finish on WHILEM, setting scan, or on NULL: */
/* recurse study_chunk() on loop bodies */
minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext,
last, data, stopparen, recursed_depth, NULL,
(mincount == 0
? (f & ~SCF_DO_SUBSTR)
: f)
, depth+1, mutate_ok);
if (flags & SCF_DO_STCLASS)
data->start_class = oclass;
if (mincount == 0 || minnext == 0) {
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
}
else if (flags & SCF_DO_STCLASS_AND) {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&this_class, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
} else { /* Non-zero len */
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
}
else if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class);
flags &= ~SCF_DO_STCLASS;
}
if (!scan) /* It was not CURLYX, but CURLY. */
scan = next;
if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR)
/* ? quantifier ok, except for (?{ ... }) */
&& (next_is_eval || !(mincount == 0 && maxcount == 1))
&& (minnext == 0) && (deltanext == 0)
&& data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR))
&& maxcount <= REG_INFTY/3) /* Complement check for big
count */
{
_WARN_HELPER(RExC_precomp_end, packWARN(WARN_REGEXP),
Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP),
"Quantifier unexpected on zero-length expression "
"in regex m/%" UTF8f "/",
UTF8fARG(UTF, RExC_precomp_end - RExC_precomp,
RExC_precomp)));
}
if ( ( minnext > 0 && mincount >= SSize_t_MAX / minnext )
|| min >= SSize_t_MAX - minnext * mincount )
{
FAIL("Regexp out of space");
}
min += minnext * mincount;
is_inf_internal |= deltanext == SSize_t_MAX
|| (maxcount == REG_INFTY && minnext + deltanext > 0);
is_inf |= is_inf_internal;
if (is_inf) {
delta = SSize_t_MAX;
} else {
delta += (minnext + deltanext) * maxcount
- minnext * mincount;
}
/* Try powerful optimization CURLYX => CURLYN. */
if ( OP(oscan) == CURLYX && data
&& data->flags & SF_IN_PAR
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext && minnext == 1
&& mutate_ok
) {
/* Try to optimize to CURLYN. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS;
regnode * const nxt1 = nxt;
#ifdef DEBUGGING
regnode *nxt2;
#endif
/* Skip open. */
nxt = regnext(nxt);
if (!REGNODE_SIMPLE(OP(nxt))
&& !(PL_regkind[OP(nxt)] == EXACT
&& STR_LEN(nxt) == 1))
goto nogo;
#ifdef DEBUGGING
nxt2 = nxt;
#endif
nxt = regnext(nxt);
if (OP(nxt) != CLOSE)
goto nogo;
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->while*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt) + 2;
}
/* Now we know that nxt2 is the only contents: */
oscan->flags = (U8)ARG(nxt);
OP(oscan) = CURLYN;
OP(nxt1) = NOTHING; /* was OPEN. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */
NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */
#endif
}
nogo:
/* Try optimization CURLYX => CURLYM. */
if ( OP(oscan) == CURLYX && data
&& !(data->flags & SF_HAS_PAR)
&& !(data->flags & SF_HAS_EVAL)
&& !deltanext /* atom is fixed width */
&& minnext != 0 /* CURLYM can't handle zero width */
/* Nor characters whose fold at run-time may be
* multi-character */
&& ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN)
&& mutate_ok
) {
/* XXXX How to optimize if data == 0? */
/* Optimize to a simpler form. */
regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */
regnode *nxt2;
OP(oscan) = CURLYM;
while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/
&& (OP(nxt2) != WHILEM))
nxt = nxt2;
OP(nxt2) = SUCCEED; /* Whas WHILEM */
/* Need to optimize away parenths. */
if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) {
/* Set the parenth number. */
regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/
oscan->flags = (U8)ARG(nxt);
if (RExC_open_parens) {
/*open->CURLYM*/
RExC_open_parens[ARG(nxt1)] = REGNODE_OFFSET(oscan);
/*close->NOTHING*/
RExC_close_parens[ARG(nxt1)] = REGNODE_OFFSET(nxt2)
+ 1;
}
OP(nxt1) = OPTIMIZED; /* was OPEN. */
OP(nxt) = OPTIMIZED; /* was CLOSE. */
#ifdef DEBUGGING
OP(nxt1 + 1) = OPTIMIZED; /* was count. */
OP(nxt + 1) = OPTIMIZED; /* was count. */
NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */
NEXT_OFF(nxt + 1) = 0; /* just for consistency. */
#endif
#if 0
while ( nxt1 && (OP(nxt1) != WHILEM)) {
regnode *nnxt = regnext(nxt1);
if (nnxt == nxt) {
if (reg_off_by_arg[OP(nxt1)])
ARG_SET(nxt1, nxt2 - nxt1);
else if (nxt2 - nxt1 < U16_MAX)
NEXT_OFF(nxt1) = nxt2 - nxt1;
else
OP(nxt) = NOTHING; /* Cannot beautify */
}
nxt1 = nnxt;
}
#endif
/* Optimize again: */
/* recurse study_chunk() on optimised CURLYX => CURLYM */
study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt,
NULL, stopparen, recursed_depth, NULL, 0,
depth+1, mutate_ok);
}
else
oscan->flags = 0;
}
else if ((OP(oscan) == CURLYX)
&& (flags & SCF_WHILEM_VISITED_POS)
/* See the comment on a similar expression above.
However, this time it's not a subexpression
we care about, but the expression itself. */
&& (maxcount == REG_INFTY)
&& data) {
/* This stays as CURLYX, we can put the count/of pair. */
/* Find WHILEM (as in regexec.c) */
regnode *nxt = oscan + NEXT_OFF(oscan);
if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */
nxt += ARG(nxt);
nxt = PREVOPER(nxt);
if (nxt->flags & 0xf) {
/* we've already set whilem count on this node */
} else if (++data->whilem_c < 16) {
assert(data->whilem_c <= RExC_whilem_seen);
nxt->flags = (U8)(data->whilem_c
| (RExC_whilem_seen << 4)); /* On WHILEM */
}
}
if (data && fl & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (flags & SCF_DO_SUBSTR) {
SV *last_str = NULL;
STRLEN last_chrs = 0;
int counted = mincount != 0;
if (data->last_end > 0 && mincount != 0) { /* Ends with a
string. */
SSize_t b = pos_before >= data->last_start_min
? pos_before : data->last_start_min;
STRLEN l;
const char * const s = SvPV_const(data->last_found, l);
SSize_t old = b - data->last_start_min;
assert(old >= 0);
if (UTF)
old = utf8_hop_forward((U8*)s, old,
(U8 *) SvEND(data->last_found))
- (U8*)s;
l -= old;
/* Get the added string: */
last_str = newSVpvn_utf8(s + old, l, UTF);
last_chrs = UTF ? utf8_length((U8*)(s + old),
(U8*)(s + old + l)) : l;
if (deltanext == 0 && pos_before == b) {
/* What was added is a constant string */
if (mincount > 1) {
SvGROW(last_str, (mincount * l) + 1);
repeatcpy(SvPVX(last_str) + l,
SvPVX_const(last_str), l,
mincount - 1);
SvCUR_set(last_str, SvCUR(last_str) * mincount);
/* Add additional parts. */
SvCUR_set(data->last_found,
SvCUR(data->last_found) - l);
sv_catsv(data->last_found, last_str);
{
SV * sv = data->last_found;
MAGIC *mg =
SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg && mg->mg_len >= 0)
mg->mg_len += last_chrs * (mincount-1);
}
last_chrs *= mincount;
data->last_end += l * (mincount - 1);
}
} else {
/* start offset must point into the last copy */
data->last_start_min += minnext * (mincount - 1);
data->last_start_max =
is_inf
? SSize_t_MAX
: data->last_start_max +
(maxcount - 1) * (minnext + data->pos_delta);
}
}
/* It is counted once already... */
data->pos_min += minnext * (mincount - counted);
#if 0
Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf
" SSize_t_MAX=%" UVuf " minnext=%" UVuf
" maxcount=%" UVuf " mincount=%" UVuf "\n",
(UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount,
(UV)mincount);
if (deltanext != SSize_t_MAX)
Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n",
(UV)(-counted * deltanext + (minnext + deltanext) * maxcount
- minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta));
#endif
if (deltanext == SSize_t_MAX
|| -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta)
data->pos_delta = SSize_t_MAX;
else
data->pos_delta += - counted * deltanext +
(minnext + deltanext) * maxcount - minnext * mincount;
if (mincount != maxcount) {
/* Cannot extend fixed substrings found inside
the group. */
scan_commit(pRExC_state, data, minlenp, is_inf);
if (mincount && last_str) {
SV * const sv = data->last_found;
MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ?
mg_find(sv, PERL_MAGIC_utf8) : NULL;
if (mg)
mg->mg_len = -1;
sv_setsv(sv, last_str);
data->last_end = data->pos_min;
data->last_start_min = data->pos_min - last_chrs;
data->last_start_max = is_inf
? SSize_t_MAX
: data->pos_min + data->pos_delta - last_chrs;
}
data->cur_is_floating = 1; /* float */
}
SvREFCNT_dec(last_str);
}
if (data && (fl & SF_HAS_EVAL))
data->flags |= SF_HAS_EVAL;
optimize_curly_tail:
rck_elide_nothing(oscan);
continue;
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d",
OP(scan));
#endif
case REF:
case CLUMP:
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) {
if (OP(scan) == CLUMP) {
/* Actually is any start char, but very few code points
* aren't start characters */
ssc_match_all_cp(data->start_class);
}
else {
ssc_anything(data->start_class);
}
}
flags &= ~SCF_DO_STCLASS;
break;
}
}
else if (OP(scan) == LNBREAK) {
if (flags & SCF_DO_STCLASS) {
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE], FALSE);
ssc_clear_locale(data->start_class);
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
else if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
PL_XPosix_ptrs[_CC_VERTSPACE],
FALSE);
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
/* See commit msg for
* 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class)
&= ~SSC_MATCHES_EMPTY_STRING;
}
flags &= ~SCF_DO_STCLASS;
}
min++;
if (delta != SSize_t_MAX)
delta++; /* Because of the 2 char string cr-lf */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += 1;
if (data->pos_delta != SSize_t_MAX) {
data->pos_delta += 1;
}
data->cur_is_floating = 1; /* float */
}
}
else if (REGNODE_SIMPLE(OP(scan))) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min++;
}
min++;
if (flags & SCF_DO_STCLASS) {
bool invert = 0;
SV* my_invlist = NULL;
U8 namedclass;
/* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */
ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING;
/* Some of the logic below assumes that switching
locale on will only add false positives. */
switch (OP(scan)) {
default:
#ifdef DEBUGGING
Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d",
OP(scan));
#endif
case SANY:
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_match_all_cp(data->start_class);
break;
case REG_ANY:
{
SV* REG_ANY_invlist = _new_invlist(2);
REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist,
'\n');
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert, hence all but \n
*/
);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class,
REG_ANY_invlist,
TRUE /* TRUE => invert */
);
ssc_clear_locale(data->start_class);
}
SvREFCNT_dec_NN(REG_ANY_invlist);
}
break;
case ANYOFD:
case ANYOFL:
case ANYOFPOSIXL:
case ANYOFH:
case ANYOF:
if (flags & SCF_DO_STCLASS_AND)
ssc_and(pRExC_state, data->start_class,
(regnode_charclass *) scan);
else
ssc_or(pRExC_state, data->start_class,
(regnode_charclass *) scan);
break;
case NANYOFM:
case ANYOFM:
{
SV* cp_list = get_ANYOFM_contents(scan);
if (flags & SCF_DO_STCLASS_OR) {
ssc_union(data->start_class, cp_list, invert);
}
else if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, cp_list, invert);
}
SvREFCNT_dec_NN(cp_list);
break;
}
case NPOSIXL:
invert = 1;
/* FALLTHROUGH */
case POSIXL:
namedclass = classnum_to_namedclass(FLAGS(scan)) + invert;
if (flags & SCF_DO_STCLASS_AND) {
bool was_there = cBOOL(
ANYOF_POSIXL_TEST(data->start_class,
namedclass));
ANYOF_POSIXL_ZERO(data->start_class);
if (was_there) { /* Do an AND */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
/* No individual code points can now match */
data->start_class->invlist
= sv_2mortal(_new_invlist(0));
}
else {
int complement = namedclass + ((invert) ? -1 : 1);
assert(flags & SCF_DO_STCLASS_OR);
/* If the complement of this class was already there,
* the result is that they match all code points,
* (\d + \D == everything). Remove the classes from
* future consideration. Locale is not relevant in
* this case */
if (ANYOF_POSIXL_TEST(data->start_class, complement)) {
ssc_match_all_cp(data->start_class);
ANYOF_POSIXL_CLEAR(data->start_class, namedclass);
ANYOF_POSIXL_CLEAR(data->start_class, complement);
}
else { /* The usual case; just add this class to the
existing set */
ANYOF_POSIXL_SET(data->start_class, namedclass);
}
}
break;
case NPOSIXA: /* For these, we always know the exact set of
what's matched */
invert = 1;
/* FALLTHROUGH */
case POSIXA:
my_invlist = invlist_clone(PL_Posix_ptrs[FLAGS(scan)], NULL);
goto join_posix_and_ascii;
case NPOSIXD:
case NPOSIXU:
invert = 1;
/* FALLTHROUGH */
case POSIXD:
case POSIXU:
my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)], NULL);
/* NPOSIXD matches all upper Latin1 code points unless the
* target string being matched is UTF-8, which is
* unknowable until match time. Since we are going to
* invert, we want to get rid of all of them so that the
* inversion will match all */
if (OP(scan) == NPOSIXD) {
_invlist_subtract(my_invlist, PL_UpperLatin1,
&my_invlist);
}
join_posix_and_ascii:
if (flags & SCF_DO_STCLASS_AND) {
ssc_intersection(data->start_class, my_invlist, invert);
ssc_clear_locale(data->start_class);
}
else {
assert(flags & SCF_DO_STCLASS_OR);
ssc_union(data->start_class, my_invlist, invert);
}
SvREFCNT_dec(my_invlist);
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) {
data->flags |= (OP(scan) == MEOL
? SF_BEFORE_MEOL
: SF_BEFORE_SEOL);
scan_commit(pRExC_state, data, minlenp, is_inf);
}
else if ( PL_regkind[OP(scan)] == BRANCHJ
/* Lookbehind, or need to calculate parens/evals/stclass: */
&& (scan->flags || data || (flags & SCF_DO_STCLASS))
&& (OP(scan) == IFMATCH || OP(scan) == UNLESSM))
{
if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY
|| OP(scan) == UNLESSM )
{
/* Negative Lookahead/lookbehind
In this case we can't do fixed string optimisation.
*/
SSize_t deltanext, minnext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* recurse study_chunk() for lookahead body */
minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext,
last, &data_fake, stopparen,
recursed_depth, NULL, f, depth+1,
mutate_ok);
if (scan->flags) {
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| minnext > (I32)U8_MAX
|| minnext + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
/* The 'next_off' field has been repurposed to count the
* additional starting positions to try beyond the initial
* one. (This leaves it at 0 for non-variable length
* matches to avoid breakage for those not using this
* extension) */
if (deltanext) {
scan->next_off = deltanext;
ckWARNexperimental(RExC_parse,
WARN_EXPERIMENTAL__VLB,
"Variable length lookbehind is experimental");
}
scan->flags = (U8)minnext + deltanext;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (f & SCF_DO_STCLASS_AND) {
if (flags & SCF_DO_STCLASS_OR) {
/* OR before, AND after: ideally we would recurse with
* data_fake to get the AND applied by study of the
* remainder of the pattern, and then derecurse;
* *** HACK *** for now just treat as "no information".
* See [perl #56690].
*/
ssc_init(pRExC_state, data->start_class);
} else {
/* AND before and after: combine and continue. These
* assertions are zero-length, so can match an EMPTY
* string */
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class)
|= SSC_MATCHES_EMPTY_STRING;
}
}
}
#if PERL_ENABLE_POSITIVE_ASSERTION_STUDY
else {
/* Positive Lookahead/lookbehind
In this case we can do fixed string optimisation,
but we must be careful about it. Note in the case of
lookbehind the positions will be offset by the minimum
length of the pattern, something we won't know about
until after the recurse.
*/
SSize_t deltanext, fake = 0;
regnode *nscan;
regnode_ssc intrnl;
int f = 0;
/* We use SAVEFREEPV so that when the full compile
is finished perl will clean up the allocated
minlens when it's all done. This way we don't
have to worry about freeing them when we know
they wont be used, which would be a pain.
*/
SSize_t *minnextp;
Newx( minnextp, 1, SSize_t );
SAVEFREEPV(minnextp);
if (data) {
StructCopy(data, &data_fake, scan_data_t);
if ((flags & SCF_DO_SUBSTR) && data->last_found) {
f |= SCF_DO_SUBSTR;
if (scan->flags)
scan_commit(pRExC_state, &data_fake, minlenp, is_inf);
data_fake.last_found=newSVsv(data->last_found);
}
}
else
data_fake.last_closep = &fake;
data_fake.flags = 0;
data_fake.substrs[0].flags = 0;
data_fake.substrs[1].flags = 0;
data_fake.pos_delta = delta;
if (is_inf)
data_fake.flags |= SF_IS_INF;
if ( flags & SCF_DO_STCLASS && !scan->flags
&& OP(scan) == IFMATCH ) { /* Lookahead */
ssc_init(pRExC_state, &intrnl);
data_fake.start_class = &intrnl;
f |= SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
next = regnext(scan);
nscan = NEXTOPER(NEXTOPER(scan));
/* positive lookahead study_chunk() recursion */
*minnextp = study_chunk(pRExC_state, &nscan, minnextp,
&deltanext, last, &data_fake,
stopparen, recursed_depth, NULL,
f, depth+1, mutate_ok);
if (scan->flags) {
assert(0); /* This code has never been tested since this
is normally not compiled */
if ( deltanext < 0
|| deltanext > (I32) U8_MAX
|| *minnextp > (I32)U8_MAX
|| *minnextp + deltanext > (I32)U8_MAX)
{
FAIL2("Lookbehind longer than %" UVuf " not implemented",
(UV)U8_MAX);
}
if (deltanext) {
scan->next_off = deltanext;
}
scan->flags = (U8)*minnextp + deltanext;
}
*minnextp += min;
if (f & SCF_DO_STCLASS_AND) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl);
ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING;
}
if (data) {
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) {
int i;
if (RExC_rx->minlen<*minnextp)
RExC_rx->minlen=*minnextp;
scan_commit(pRExC_state, &data_fake, minnextp, is_inf);
SvREFCNT_dec_NN(data_fake.last_found);
for (i = 0; i < 2; i++) {
if (data_fake.substrs[i].minlenp != minlenp) {
data->substrs[i].min_offset =
data_fake.substrs[i].min_offset;
data->substrs[i].max_offset =
data_fake.substrs[i].max_offset;
data->substrs[i].minlenp =
data_fake.substrs[i].minlenp;
data->substrs[i].lookbehind += scan->flags;
}
}
}
}
}
#endif
}
else if (OP(scan) == OPEN) {
if (stopparen != (I32)ARG(scan))
pars++;
}
else if (OP(scan) == CLOSE) {
if (stopparen == (I32)ARG(scan)) {
break;
}
if ((I32)ARG(scan) == is_par) {
next = regnext(scan);
if ( next && (OP(next) != WHILEM) && next < last)
is_par = 0; /* Disable optimization */
}
if (data)
*(data->last_closep) = ARG(scan);
}
else if (OP(scan) == EVAL) {
if (data)
data->flags |= SF_HAS_EVAL;
}
else if ( PL_regkind[OP(scan)] == ENDLIKE ) {
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
flags &= ~SCF_DO_SUBSTR;
}
if (data && OP(scan)==ACCEPT) {
data->flags |= SCF_SEEN_ACCEPT;
if (stopmin > min)
stopmin = min;
}
}
else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */
{
if (flags & SCF_DO_SUBSTR) {
scan_commit(pRExC_state, data, minlenp, is_inf);
data->cur_is_floating = 1; /* float */
}
is_inf = is_inf_internal = 1;
if (flags & SCF_DO_STCLASS_OR) /* Allow everything */
ssc_anything(data->start_class);
flags &= ~SCF_DO_STCLASS;
}
else if (OP(scan) == GPOS) {
if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) &&
!(delta || is_inf || (data && data->pos_delta)))
{
if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR))
RExC_rx->intflags |= PREGf_ANCH_GPOS;
if (RExC_rx->gofs < (STRLEN)min)
RExC_rx->gofs = min;
} else {
RExC_rx->intflags |= PREGf_GPOS_FLOAT;
RExC_rx->gofs = 0;
}
}
#ifdef TRIE_STUDY_OPT
#ifdef FULL_TRIE_STUDY
else if (PL_regkind[OP(scan)] == TRIE) {
/* NOTE - There is similar code to this block above for handling
BRANCH nodes on the initial study. If you change stuff here
check there too. */
regnode *trie_node= scan;
regnode *tail= regnext(scan);
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
SSize_t max1 = 0, min1 = SSize_t_MAX;
regnode_ssc accum;
if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */
/* Cannot merge strings after this. */
scan_commit(pRExC_state, data, minlenp, is_inf);
}
if (flags & SCF_DO_STCLASS)
ssc_init_zero(pRExC_state, &accum);
if (!trie->jump) {
min1= trie->minlen;
max1= trie->maxlen;
} else {
const regnode *nextbranch= NULL;
U32 word;
for ( word=1 ; word <= trie->wordcount ; word++)
{
SSize_t deltanext=0, minnext=0, f = 0, fake;
regnode_ssc this_class;
StructCopy(&zero_scan_data, &data_fake, scan_data_t);
if (data) {
data_fake.whilem_c = data->whilem_c;
data_fake.last_closep = data->last_closep;
}
else
data_fake.last_closep = &fake;
data_fake.pos_delta = delta;
if (flags & SCF_DO_STCLASS) {
ssc_init(pRExC_state, &this_class);
data_fake.start_class = &this_class;
f = SCF_DO_STCLASS_AND;
}
if (flags & SCF_WHILEM_VISITED_POS)
f |= SCF_WHILEM_VISITED_POS;
if (trie->jump[word]) {
if (!nextbranch)
nextbranch = trie_node + trie->jump[0];
scan= trie_node + trie->jump[word];
/* We go from the jump point to the branch that follows
it. Note this means we need the vestigal unused
branches even though they arent otherwise used. */
/* optimise study_chunk() for TRIE */
minnext = study_chunk(pRExC_state, &scan, minlenp,
&deltanext, (regnode *)nextbranch, &data_fake,
stopparen, recursed_depth, NULL, f, depth+1,
mutate_ok);
}
if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH)
nextbranch= regnext((regnode*)nextbranch);
if (min1 > (SSize_t)(minnext + trie->minlen))
min1 = minnext + trie->minlen;
if (deltanext == SSize_t_MAX) {
is_inf = is_inf_internal = 1;
max1 = SSize_t_MAX;
} else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen))
max1 = minnext + deltanext + trie->maxlen;
if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR))
pars++;
if (data_fake.flags & SCF_SEEN_ACCEPT) {
if ( stopmin > min + min1)
stopmin = min + min1;
flags &= ~SCF_DO_SUBSTR;
if (data)
data->flags |= SCF_SEEN_ACCEPT;
}
if (data) {
if (data_fake.flags & SF_HAS_EVAL)
data->flags |= SF_HAS_EVAL;
data->whilem_c = data_fake.whilem_c;
}
if (flags & SCF_DO_STCLASS)
ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class);
}
}
if (flags & SCF_DO_SUBSTR) {
data->pos_min += min1;
data->pos_delta += max1 - min1;
if (max1 != min1 || is_inf)
data->cur_is_floating = 1; /* float */
}
min += min1;
if (delta != SSize_t_MAX) {
if (SSize_t_MAX - (max1 - min1) >= delta)
delta += max1 - min1;
else
delta = SSize_t_MAX;
}
if (flags & SCF_DO_STCLASS_OR) {
ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum);
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
flags &= ~SCF_DO_STCLASS;
}
}
else if (flags & SCF_DO_STCLASS_AND) {
if (min1) {
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum);
flags &= ~SCF_DO_STCLASS;
}
else {
/* Switch to OR mode: cache the old value of
* data->start_class */
INIT_AND_WITHP;
StructCopy(data->start_class, and_withp, regnode_ssc);
flags &= ~SCF_DO_STCLASS_AND;
StructCopy(&accum, data->start_class, regnode_ssc);
flags |= SCF_DO_STCLASS_OR;
}
}
scan= tail;
continue;
}
#else
else if (PL_regkind[OP(scan)] == TRIE) {
reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ];
U8*bang=NULL;
min += trie->minlen;
delta += (trie->maxlen - trie->minlen);
flags &= ~SCF_DO_STCLASS; /* xxx */
if (flags & SCF_DO_SUBSTR) {
/* Cannot expect anything... */
scan_commit(pRExC_state, data, minlenp, is_inf);
data->pos_min += trie->minlen;
data->pos_delta += (trie->maxlen - trie->minlen);
if (trie->maxlen != trie->minlen)
data->cur_is_floating = 1; /* float */
}
if (trie->jump) /* no more substrings -- for now /grr*/
flags &= ~SCF_DO_SUBSTR;
}
#endif /* old or new */
#endif /* TRIE_STUDY_OPT */
/* Else: zero-length, ignore. */
scan = regnext(scan);
}
finish:
if (frame) {
/* we need to unwind recursion. */
depth = depth - 1;
DEBUG_STUDYDATA("frame-end", data, depth, is_inf);
DEBUG_PEEP("fend", scan, depth, flags);
/* restore previous context */
last = frame->last_regnode;
scan = frame->next_regnode;
stopparen = frame->stopparen;
recursed_depth = frame->prev_recursed_depth;
RExC_frame_last = frame->prev_frame;
frame = frame->this_prev_frame;
goto fake_study_recurse;
}
assert(!frame);
DEBUG_STUDYDATA("pre-fin", data, depth, is_inf);
*scanp = scan;
*deltap = is_inf_internal ? SSize_t_MAX : delta;
if (flags & SCF_DO_SUBSTR && is_inf)
data->pos_delta = SSize_t_MAX - data->pos_min;
if (is_par > (I32)U8_MAX)
is_par = 0;
if (is_par && pars==1 && data) {
data->flags |= SF_IN_PAR;
data->flags &= ~SF_HAS_PAR;
}
else if (pars && data) {
data->flags |= SF_HAS_PAR;
data->flags &= ~SF_IN_PAR;
}
if (flags & SCF_DO_STCLASS_OR)
ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp);
if (flags & SCF_TRIE_RESTUDY)
data->flags |= SCF_TRIE_RESTUDY;
DEBUG_STUDYDATA("post-fin", data, depth, is_inf);
{
SSize_t final_minlen= min < stopmin ? min : stopmin;
if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) {
if (final_minlen > SSize_t_MAX - delta)
RExC_maxlen = SSize_t_MAX;
else if (RExC_maxlen < final_minlen + delta)
RExC_maxlen = final_minlen + delta;
}
return final_minlen;
}
NOT_REACHED; /* NOTREACHED */
|
7575865277439549502998615595514633684
|
regcomp.c
|
6878494453235899109541655885780527455
|
CWE-120
|
CVE-2020-12723
|
regcomp.c in Perl before 5.30.3 allows a buffer overflow via a crafted regular expression because of recursive S_study_chunk calls.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-12723
|
215,312
|
linux-2.6
|
9926e4c74300c4b31dee007298c6475d33369df0
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=9926e4c74300c4b31dee007298c6475d33369df0
|
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 1
|
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit new_rlim, *old_rlim;
unsigned long it_prof_secs;
int retval;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
if (new_rlim.rlim_cur > new_rlim.rlim_max)
return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
return retval;
task_lock(current->group_leader);
*old_rlim = new_rlim;
task_unlock(current->group_leader);
if (resource != RLIMIT_CPU)
goto out;
/*
* RLIMIT_CPU handling. Note that the kernel fails to return an error
* code if it rejected the user's attempt to set RLIMIT_CPU. This is a
* very long-standing error, and fixing it now risks breakage of
* applications, so we live with it
*/
if (new_rlim.rlim_cur == RLIM_INFINITY)
goto out;
it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
unsigned long rlim_cur = new_rlim.rlim_cur;
cputime_t cputime;
if (rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
rlim_cur = 1;
}
cputime = secs_to_cputime(rlim_cur);
read_lock(&tasklist_lock);
spin_lock_irq(¤t->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(¤t->sighand->siglock);
read_unlock(&tasklist_lock);
}
out:
return 0;
}
|
308291960942000103777374680863829169927
|
None
|
CWE-20
|
CVE-2008-1294
|
Linux kernel 2.6.17, and other versions before 2.6.22, does not check when a user attempts to set RLIMIT_CPU to 0 until after the change is made, which allows local users to bypass intended resource limits.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-1294
|
|
487,618
|
linux-2.6
|
9926e4c74300c4b31dee007298c6475d33369df0
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=9926e4c74300c4b31dee007298c6475d33369df0
|
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 0
|
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit new_rlim, *old_rlim;
unsigned long it_prof_secs;
int retval;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
if (new_rlim.rlim_cur > new_rlim.rlim_max)
return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
return retval;
if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
new_rlim.rlim_cur = 1;
}
task_lock(current->group_leader);
*old_rlim = new_rlim;
task_unlock(current->group_leader);
if (resource != RLIMIT_CPU)
goto out;
/*
* RLIMIT_CPU handling. Note that the kernel fails to return an error
* code if it rejected the user's attempt to set RLIMIT_CPU. This is a
* very long-standing error, and fixing it now risks breakage of
* applications, so we live with it
*/
if (new_rlim.rlim_cur == RLIM_INFINITY)
goto out;
it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
unsigned long rlim_cur = new_rlim.rlim_cur;
cputime_t cputime;
cputime = secs_to_cputime(rlim_cur);
read_lock(&tasklist_lock);
spin_lock_irq(¤t->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(¤t->sighand->siglock);
read_unlock(&tasklist_lock);
}
out:
return 0;
}
|
187909105803248299994402329971280049548
|
None
|
CWE-20
|
CVE-2008-1294
|
Linux kernel 2.6.17, and other versions before 2.6.22, does not check when a user attempts to set RLIMIT_CPU to 0 until after the change is made, which allows local users to bypass intended resource limits.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-1294
|
|
215,374
|
linux-2.6
|
328fc47ea0bcc27d9afa69c3ad6e52431cadd76c
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=328fc47ea0bcc27d9afa69c3ad6e52431cadd76c
|
sctp: correct bounds check in sctp_setsockopt_auth_key
The bonds check to prevent buffer overlflow was not exactly
right. It still allowed overflow of up to 8 bytes which is
sizeof(struct sctp_authkey).
Since optlen is already checked against the size of that struct,
we are guaranteed not to cause interger overflow either.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
| 1
|
static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
int optlen)
{
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
if (!sctp_auth_enable)
return -EACCES;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
authkey = kmalloc(optlen, GFP_KERNEL);
if (!authkey)
return -ENOMEM;
if (copy_from_user(authkey, optval, optlen)) {
ret = -EFAULT;
goto out;
}
if (authkey->sca_keylength > optlen) {
ret = -EINVAL;
goto out;
}
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
goto out;
}
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
kfree(authkey);
return ret;
}
|
46314924383394303871242635983086014836
|
None
|
CWE-189
|
CVE-2008-3526
|
Integer overflow in the sctp_setsockopt_auth_key function in net/sctp/socket.c in the Stream Control Transmission Protocol (sctp) implementation in the Linux kernel 2.6.24-rc1 through 2.6.26.3 allows remote attackers to cause a denial of service (panic) or possibly have unspecified other impact via a crafted sca_keylength field associated with the SCTP_AUTH_KEY option.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-3526
|
|
488,919
|
linux-2.6
|
328fc47ea0bcc27d9afa69c3ad6e52431cadd76c
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=328fc47ea0bcc27d9afa69c3ad6e52431cadd76c
|
sctp: correct bounds check in sctp_setsockopt_auth_key
The bonds check to prevent buffer overlflow was not exactly
right. It still allowed overflow of up to 8 bytes which is
sizeof(struct sctp_authkey).
Since optlen is already checked against the size of that struct,
we are guaranteed not to cause interger overflow either.
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
| 0
|
static int sctp_setsockopt_auth_key(struct sock *sk,
char __user *optval,
int optlen)
{
struct sctp_authkey *authkey;
struct sctp_association *asoc;
int ret;
if (!sctp_auth_enable)
return -EACCES;
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
authkey = kmalloc(optlen, GFP_KERNEL);
if (!authkey)
return -ENOMEM;
if (copy_from_user(authkey, optval, optlen)) {
ret = -EFAULT;
goto out;
}
if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
ret = -EINVAL;
goto out;
}
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
goto out;
}
ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
out:
kfree(authkey);
return ret;
}
|
298167600420793747403712897523979919986
|
None
|
CWE-189
|
CVE-2008-3526
|
Integer overflow in the sctp_setsockopt_auth_key function in net/sctp/socket.c in the Stream Control Transmission Protocol (sctp) implementation in the Linux kernel 2.6.24-rc1 through 2.6.26.3 allows remote attackers to cause a denial of service (panic) or possibly have unspecified other impact via a crafted sca_keylength field associated with the SCTP_AUTH_KEY option.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-3526
|
|
215,391
|
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
| 1
|
static sctp_disposition_t sctp_sf_violation_paramlen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands) {
static const char err_str[] = "The following parameter had invalid length:";
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
|
31315788610604386259282762063646605709
|
None
|
CWE-20
|
CVE-2008-4618
|
The Stream Control Transmission Protocol (sctp) implementation in the Linux kernel before 2.6.27 does not properly handle a protocol violation in which a parameter has an invalid length, which allows attackers to cause a denial of service (panic) via unspecified vectors, related to sctp_sf_violation_paramlen, sctp_sf_abort_violation, sctp_make_abort_violation, and incorrect data types in function calls.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4618
|
|
489,124
|
linux-2.6
|
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=ba0166708ef4da7eeb61dd92bbba4d5a749d6561
|
sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
| 0
|
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[]="The following chunk had invalid length:";
return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
|
307853403397128900302576220100500126991
|
None
|
CWE-20
|
CVE-2008-4618
|
The Stream Control Transmission Protocol (sctp) implementation in the Linux kernel before 2.6.27 does not properly handle a protocol violation in which a parameter has an invalid length, which allows attackers to cause a denial of service (panic) via unspecified vectors, related to sctp_sf_violation_paramlen, sctp_sf_abort_violation, sctp_make_abort_violation, and incorrect data types in function calls.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4618
|
|
215,399
|
linux-2.6
|
efc7ffcb4237f8cb9938909041c4ed38f6e1bf40
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=efc7ffcb4237f8cb9938909041c4ed38f6e1bf40
|
hfsplus: fix Buffer overflow with a corrupted image
When an hfsplus image gets corrupted it might happen that the catalog
namelength field gets b0rked. If we mount such an image the memcpy() in
hfsplus_cat_build_key_uni() writes more than the 255 that fit in the name
field. Depending on the size of the overwritten data, we either only get
memory corruption or also trigger an oops like this:
[ 221.628020] BUG: unable to handle kernel paging request at c82b0000
[ 221.629066] IP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151
[ 221.629066] *pde = 0ea29163 *pte = 082b0160
[ 221.629066] Oops: 0002 [#1] PREEMPT DEBUG_PAGEALLOC
[ 221.629066] Modules linked in:
[ 221.629066]
[ 221.629066] Pid: 4845, comm: mount Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #28)
[ 221.629066] EIP: 0060:[<c022d4b1>] EFLAGS: 00010206 CPU: 0
[ 221.629066] EIP is at hfsplus_find_cat+0x10d/0x151
[ 221.629066] EAX: 00000029 EBX: 00016210 ECX: 000042c2 EDX: 00000002
[ 221.629066] ESI: c82d70ca EDI: c82b0000 EBP: c82d1bcc ESP: c82d199c
[ 221.629066] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[ 221.629066] Process mount (pid: 4845, ti=c82d1000 task=c8224060 task.ti=c82d1000)
[ 221.629066] Stack: c080b3c4 c82aa8f8 c82d19c2 00016210 c080b3be c82d1bd4 c82aa8f0 00000300
[ 221.629066] 01000000 750008b1 74006e00 74006900 65006c00 c82d6400 c013bd35 c8224060
[ 221.629066] 00000036 00000046 c82d19f0 00000082 c8224548 c8224060 00000036 c0d653cc
[ 221.629066] Call Trace:
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c01302d2>] ? __kernel_text_address+0x1b/0x27
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109e32>] ? save_stack_address+0x0/0x2c
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c013553d>] ? down+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013da5d>] ? mark_held_locks+0x43/0x5a
[ 221.629066] [<c013dc3a>] ? trace_hardirqs_on+0xb/0xd
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c06abec8>] ? _spin_unlock_irqrestore+0x42/0x58
[ 221.629066] [<c013555c>] ? down+0x2b/0x2f
[ 221.629066] [<c022aa68>] ? hfsplus_iget+0xa0/0x154
[ 221.629066] [<c022b0b9>] ? hfsplus_fill_super+0x280/0x447
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c041c9e4>] ? string+0x2b/0x74
[ 221.629066] [<c041cd16>] ? vsnprintf+0x2e9/0x512
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c01354d3>] ? up+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c041cfb7>] ? snprintf+0x1b/0x1d
[ 221.629066] [<c01ba466>] ? disk_name+0x25/0x67
[ 221.629066] [<c0183960>] ? get_sb_bdev+0xcd/0x10b
[ 221.629066] [<c016ad92>] ? kstrdup+0x2a/0x4c
[ 221.629066] [<c022a7b3>] ? hfsplus_get_sb+0x13/0x15
[ 221.629066] [<c022ae39>] ? hfsplus_fill_super+0x0/0x447
[ 221.629066] [<c0183583>] ? vfs_kern_mount+0x3b/0x76
[ 221.629066] [<c0183602>] ? do_kern_mount+0x32/0xba
[ 221.629066] [<c01960d4>] ? do_new_mount+0x46/0x74
[ 221.629066] [<c0196277>] ? do_mount+0x175/0x193
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c01663b2>] ? __get_free_pages+0x1e/0x24
[ 221.629066] [<c06ac07b>] ? lock_kernel+0x19/0x8c
[ 221.629066] [<c01962e6>] ? sys_mount+0x51/0x9b
[ 221.629066] [<c01962f9>] ? sys_mount+0x64/0x9b
[ 221.629066] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[ 221.629066] =======================
[ 221.629066] Code: 89 c2 c1 e2 08 c1 e8 08 09 c2 8b 85 e8 fd ff ff 66 89 50 06 89 c7 53 83 c7 08 56 57 68 c4 b3 80 c0 e8 8c 5c ef ff 89 d9 c1 e9 02 <f3> a5 89 d9 83 e1 03 74 02 f3 a4 83 c3 06 8b 95 e8 fd ff ff 0f
[ 221.629066] EIP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151 SS:ESP 0068:c82d199c
[ 221.629066] ---[ end trace e417a1d67f0d0066 ]---
Since hfsplus_cat_build_key_uni() returns void and only has one callsite,
the check is performed at the callsite.
Signed-off-by: Eric Sesterhenn <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 1
|
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
int err;
u16 type;
hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
type = be16_to_cpu(tmp.type);
if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
printk(KERN_ERR "hfs: found bad thread record in catalog\n");
return -EIO;
}
hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
return hfs_brec_find(fd);
}
|
215695318616116738125696937571179709955
|
None
|
CWE-119
|
CVE-2008-4933
|
Buffer overflow in the hfsplus_find_cat function in fs/hfsplus/catalog.c in the Linux kernel before 2.6.28-rc1 allows attackers to cause a denial of service (memory corruption or system crash) via an hfsplus filesystem image with an invalid catalog namelength field, related to the hfsplus_cat_build_key_uni function.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4933
|
|
489,215
|
linux-2.6
|
efc7ffcb4237f8cb9938909041c4ed38f6e1bf40
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=efc7ffcb4237f8cb9938909041c4ed38f6e1bf40
|
hfsplus: fix Buffer overflow with a corrupted image
When an hfsplus image gets corrupted it might happen that the catalog
namelength field gets b0rked. If we mount such an image the memcpy() in
hfsplus_cat_build_key_uni() writes more than the 255 that fit in the name
field. Depending on the size of the overwritten data, we either only get
memory corruption or also trigger an oops like this:
[ 221.628020] BUG: unable to handle kernel paging request at c82b0000
[ 221.629066] IP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151
[ 221.629066] *pde = 0ea29163 *pte = 082b0160
[ 221.629066] Oops: 0002 [#1] PREEMPT DEBUG_PAGEALLOC
[ 221.629066] Modules linked in:
[ 221.629066]
[ 221.629066] Pid: 4845, comm: mount Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #28)
[ 221.629066] EIP: 0060:[<c022d4b1>] EFLAGS: 00010206 CPU: 0
[ 221.629066] EIP is at hfsplus_find_cat+0x10d/0x151
[ 221.629066] EAX: 00000029 EBX: 00016210 ECX: 000042c2 EDX: 00000002
[ 221.629066] ESI: c82d70ca EDI: c82b0000 EBP: c82d1bcc ESP: c82d199c
[ 221.629066] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[ 221.629066] Process mount (pid: 4845, ti=c82d1000 task=c8224060 task.ti=c82d1000)
[ 221.629066] Stack: c080b3c4 c82aa8f8 c82d19c2 00016210 c080b3be c82d1bd4 c82aa8f0 00000300
[ 221.629066] 01000000 750008b1 74006e00 74006900 65006c00 c82d6400 c013bd35 c8224060
[ 221.629066] 00000036 00000046 c82d19f0 00000082 c8224548 c8224060 00000036 c0d653cc
[ 221.629066] Call Trace:
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c01302d2>] ? __kernel_text_address+0x1b/0x27
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109e32>] ? save_stack_address+0x0/0x2c
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c013553d>] ? down+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013da5d>] ? mark_held_locks+0x43/0x5a
[ 221.629066] [<c013dc3a>] ? trace_hardirqs_on+0xb/0xd
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c06abec8>] ? _spin_unlock_irqrestore+0x42/0x58
[ 221.629066] [<c013555c>] ? down+0x2b/0x2f
[ 221.629066] [<c022aa68>] ? hfsplus_iget+0xa0/0x154
[ 221.629066] [<c022b0b9>] ? hfsplus_fill_super+0x280/0x447
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c041c9e4>] ? string+0x2b/0x74
[ 221.629066] [<c041cd16>] ? vsnprintf+0x2e9/0x512
[ 221.629066] [<c010487a>] ? dump_trace+0xca/0xd6
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c0109eaf>] ? save_stack_trace+0x1c/0x3a
[ 221.629066] [<c013b571>] ? save_trace+0x37/0x8d
[ 221.629066] [<c013b62e>] ? add_lock_to_list+0x67/0x8d
[ 221.629066] [<c013ea1c>] ? validate_chain+0x8a4/0x9f4
[ 221.629066] [<c01354d3>] ? up+0xc/0x2f
[ 221.629066] [<c013f1f6>] ? __lock_acquire+0x68a/0x6e0
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c013bca3>] ? trace_hardirqs_off_caller+0x14/0x9b
[ 221.629066] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[ 221.629066] [<c0107aa3>] ? native_sched_clock+0x82/0x96
[ 221.629066] [<c041cfb7>] ? snprintf+0x1b/0x1d
[ 221.629066] [<c01ba466>] ? disk_name+0x25/0x67
[ 221.629066] [<c0183960>] ? get_sb_bdev+0xcd/0x10b
[ 221.629066] [<c016ad92>] ? kstrdup+0x2a/0x4c
[ 221.629066] [<c022a7b3>] ? hfsplus_get_sb+0x13/0x15
[ 221.629066] [<c022ae39>] ? hfsplus_fill_super+0x0/0x447
[ 221.629066] [<c0183583>] ? vfs_kern_mount+0x3b/0x76
[ 221.629066] [<c0183602>] ? do_kern_mount+0x32/0xba
[ 221.629066] [<c01960d4>] ? do_new_mount+0x46/0x74
[ 221.629066] [<c0196277>] ? do_mount+0x175/0x193
[ 221.629066] [<c013dbf4>] ? trace_hardirqs_on_caller+0xf4/0x12f
[ 221.629066] [<c01663b2>] ? __get_free_pages+0x1e/0x24
[ 221.629066] [<c06ac07b>] ? lock_kernel+0x19/0x8c
[ 221.629066] [<c01962e6>] ? sys_mount+0x51/0x9b
[ 221.629066] [<c01962f9>] ? sys_mount+0x64/0x9b
[ 221.629066] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[ 221.629066] =======================
[ 221.629066] Code: 89 c2 c1 e2 08 c1 e8 08 09 c2 8b 85 e8 fd ff ff 66 89 50 06 89 c7 53 83 c7 08 56 57 68 c4 b3 80 c0 e8 8c 5c ef ff 89 d9 c1 e9 02 <f3> a5 89 d9 83 e1 03 74 02 f3 a4 83 c3 06 8b 95 e8 fd ff ff 0f
[ 221.629066] EIP: [<c022d4b1>] hfsplus_find_cat+0x10d/0x151 SS:ESP 0068:c82d199c
[ 221.629066] ---[ end trace e417a1d67f0d0066 ]---
Since hfsplus_cat_build_key_uni() returns void and only has one callsite,
the check is performed at the callsite.
Signed-off-by: Eric Sesterhenn <[email protected]>
Reviewed-by: Pekka Enberg <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 0
|
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd)
{
hfsplus_cat_entry tmp;
int err;
u16 type;
hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
if (err)
return err;
type = be16_to_cpu(tmp.type);
if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
printk(KERN_ERR "hfs: found bad thread record in catalog\n");
return -EIO;
}
if (be16_to_cpu(tmp.thread.nodeName.length) > 255) {
printk(KERN_ERR "hfs: catalog name length corrupted\n");
return -EIO;
}
hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID),
&tmp.thread.nodeName);
return hfs_brec_find(fd);
}
|
296624566166126910302983685635029664818
|
None
|
CWE-119
|
CVE-2008-4933
|
Buffer overflow in the hfsplus_find_cat function in fs/hfsplus/catalog.c in the Linux kernel before 2.6.28-rc1 allows attackers to cause a denial of service (memory corruption or system crash) via an hfsplus filesystem image with an invalid catalog namelength field, related to the hfsplus_cat_build_key_uni function.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4933
|
|
215,400
|
linux-2.6
|
649f1ee6c705aab644035a7998d7b574193a598a
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=649f1ee6c705aab644035a7998d7b574193a598a
|
hfsplus: check read_mapping_page() return value
While testing more corrupted images with hfsplus, i came across
one which triggered the following bug:
[15840.675016] BUG: unable to handle kernel paging request at fffffffb
[15840.675016] IP: [<c0116a4f>] kmap+0x15/0x56
[15840.675016] *pde = 00008067 *pte = 00000000
[15840.675016] Oops: 0000 [#1] PREEMPT DEBUG_PAGEALLOC
[15840.675016] Modules linked in:
[15840.675016]
[15840.675016] Pid: 11575, comm: ln Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #29)
[15840.675016] EIP: 0060:[<c0116a4f>] EFLAGS: 00010202 CPU: 0
[15840.675016] EIP is at kmap+0x15/0x56
[15840.675016] EAX: 00000246 EBX: fffffffb ECX: 00000000 EDX: cab919c0
[15840.675016] ESI: 000007dd EDI: cab0bcf4 EBP: cab0bc98 ESP: cab0bc94
[15840.675016] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[15840.675016] Process ln (pid: 11575, ti=cab0b000 task=cab919c0 task.ti=cab0b000)
[15840.675016] Stack: 00000000 cab0bcdc c0231cfb 00000000 cab0bce0 00000800 ca9290c0 fffffffb
[15840.675016] cab145d0 cab919c0 cab15998 22222222 22222222 22222222 00000001 cab15960
[15840.675016] 000007dd cab0bcf4 cab0bd04 c022cb3a cab0bcf4 cab15a6c ca9290c0 00000000
[15840.675016] Call Trace:
[15840.675016] [<c0231cfb>] ? hfsplus_block_allocate+0x6f/0x2d3
[15840.675016] [<c022cb3a>] ? hfsplus_file_extend+0xc4/0x1db
[15840.675016] [<c022ce41>] ? hfsplus_get_block+0x8c/0x19d
[15840.675016] [<c06adde4>] ? sub_preempt_count+0x9d/0xab
[15840.675016] [<c019ece6>] ? __block_prepare_write+0x147/0x311
[15840.675016] [<c0161934>] ? __grab_cache_page+0x52/0x73
[15840.675016] [<c019ef4f>] ? block_write_begin+0x79/0xd5
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c019f22a>] ? cont_write_begin+0x27f/0x2af
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0139ebe>] ? tick_program_event+0x28/0x4c
[15840.675016] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[15840.675016] [<c022b723>] ? hfsplus_write_begin+0x2d/0x32
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0161988>] ? pagecache_write_begin+0x33/0x107
[15840.675016] [<c01879e5>] ? __page_symlink+0x3c/0xae
[15840.675016] [<c019ad34>] ? __mark_inode_dirty+0x12f/0x137
[15840.675016] [<c0187a70>] ? page_symlink+0x19/0x1e
[15840.675016] [<c022e6eb>] ? hfsplus_symlink+0x41/0xa6
[15840.675016] [<c01886a9>] ? vfs_symlink+0x99/0x101
[15840.675016] [<c018a2f6>] ? sys_symlinkat+0x6b/0xad
[15840.675016] [<c018a348>] ? sys_symlink+0x10/0x12
[15840.675016] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[15840.675016] =======================
[15840.675016] Code: 00 00 75 10 83 3d 88 2f ec c0 02 75 07 89 d0 e8 12 56 05 00 5d c3 55 ba 06 00 00 00 89 e5 53 89 c3 b8 3d eb 7e c0 e8 16 74 00 00 <8b> 03 c1 e8 1e 69 c0 d8 02 00 00 05 b8 69 8e c0 2b 80 c4 02 00
[15840.675016] EIP: [<c0116a4f>] kmap+0x15/0x56 SS:ESP 0068:cab0bc94
[15840.675016] ---[ end trace 4fea40dad6b70e5f ]---
This happens because the return value of read_mapping_page() is passed on
to kmap unchecked. The bug is triggered after the first
read_mapping_page() in hfsplus_block_allocate(), this patch fixes all
three usages in this functions but leaves the ones further down in the
file unchanged.
Signed-off-by: Eric Sesterhenn <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 1
|
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
{
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, start, len, n;
__be32 val;
int i;
len = *max;
if (!len)
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
/* scan the first partial u32 for zero bits */
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = (1U << 31) >> i;
for (; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
/* scan complete u32s for the first zero bit */
while (1) {
while (curr < end) {
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = 1 << 31;
for (i = 0; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
}
kunmap(page);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
dprint(DBG_BITMAP, "bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
dprint(DBG_BITMAP, "bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
len = min(size - start, len);
while (1) {
n |= mask;
if (++i >= 32)
break;
mask >>= 1;
if (!--len || n & mask)
goto done;
}
if (!--len)
goto done;
*curr++ = cpu_to_be32(n);
/* do full u32s */
while (1) {
while (curr < end) {
n = be32_to_cpu(*curr);
if (len < 32)
goto last;
if (n) {
len = 32;
goto last;
}
*curr++ = cpu_to_be32(0xffffffff);
len -= 32;
}
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
last:
/* do any partial u32 at end */
mask = 1U << 31;
for (i = 0; i < len; i++) {
if (n & mask)
break;
n |= mask;
mask >>= 1;
}
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
HFSPLUS_SB(sb).free_blocks -= *max;
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
}
|
83147584987349782496711287280520974541
|
None
|
CWE-20
|
CVE-2008-4934
|
The hfsplus_block_allocate function in fs/hfsplus/bitmap.c in the Linux kernel before 2.6.28-rc1 does not check a certain return value from the read_mapping_page function before calling kmap, which allows attackers to cause a denial of service (system crash) via a crafted hfsplus filesystem image.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4934
|
|
489,221
|
linux-2.6
|
649f1ee6c705aab644035a7998d7b574193a598a
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6
|
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=649f1ee6c705aab644035a7998d7b574193a598a
|
hfsplus: check read_mapping_page() return value
While testing more corrupted images with hfsplus, i came across
one which triggered the following bug:
[15840.675016] BUG: unable to handle kernel paging request at fffffffb
[15840.675016] IP: [<c0116a4f>] kmap+0x15/0x56
[15840.675016] *pde = 00008067 *pte = 00000000
[15840.675016] Oops: 0000 [#1] PREEMPT DEBUG_PAGEALLOC
[15840.675016] Modules linked in:
[15840.675016]
[15840.675016] Pid: 11575, comm: ln Not tainted (2.6.27-rc4-00123-gd3ee1b4-dirty #29)
[15840.675016] EIP: 0060:[<c0116a4f>] EFLAGS: 00010202 CPU: 0
[15840.675016] EIP is at kmap+0x15/0x56
[15840.675016] EAX: 00000246 EBX: fffffffb ECX: 00000000 EDX: cab919c0
[15840.675016] ESI: 000007dd EDI: cab0bcf4 EBP: cab0bc98 ESP: cab0bc94
[15840.675016] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068
[15840.675016] Process ln (pid: 11575, ti=cab0b000 task=cab919c0 task.ti=cab0b000)
[15840.675016] Stack: 00000000 cab0bcdc c0231cfb 00000000 cab0bce0 00000800 ca9290c0 fffffffb
[15840.675016] cab145d0 cab919c0 cab15998 22222222 22222222 22222222 00000001 cab15960
[15840.675016] 000007dd cab0bcf4 cab0bd04 c022cb3a cab0bcf4 cab15a6c ca9290c0 00000000
[15840.675016] Call Trace:
[15840.675016] [<c0231cfb>] ? hfsplus_block_allocate+0x6f/0x2d3
[15840.675016] [<c022cb3a>] ? hfsplus_file_extend+0xc4/0x1db
[15840.675016] [<c022ce41>] ? hfsplus_get_block+0x8c/0x19d
[15840.675016] [<c06adde4>] ? sub_preempt_count+0x9d/0xab
[15840.675016] [<c019ece6>] ? __block_prepare_write+0x147/0x311
[15840.675016] [<c0161934>] ? __grab_cache_page+0x52/0x73
[15840.675016] [<c019ef4f>] ? block_write_begin+0x79/0xd5
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c019f22a>] ? cont_write_begin+0x27f/0x2af
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0139ebe>] ? tick_program_event+0x28/0x4c
[15840.675016] [<c013bd35>] ? trace_hardirqs_off+0xb/0xd
[15840.675016] [<c022b723>] ? hfsplus_write_begin+0x2d/0x32
[15840.675016] [<c022cdb5>] ? hfsplus_get_block+0x0/0x19d
[15840.675016] [<c0161988>] ? pagecache_write_begin+0x33/0x107
[15840.675016] [<c01879e5>] ? __page_symlink+0x3c/0xae
[15840.675016] [<c019ad34>] ? __mark_inode_dirty+0x12f/0x137
[15840.675016] [<c0187a70>] ? page_symlink+0x19/0x1e
[15840.675016] [<c022e6eb>] ? hfsplus_symlink+0x41/0xa6
[15840.675016] [<c01886a9>] ? vfs_symlink+0x99/0x101
[15840.675016] [<c018a2f6>] ? sys_symlinkat+0x6b/0xad
[15840.675016] [<c018a348>] ? sys_symlink+0x10/0x12
[15840.675016] [<c01038bd>] ? sysenter_do_call+0x12/0x31
[15840.675016] =======================
[15840.675016] Code: 00 00 75 10 83 3d 88 2f ec c0 02 75 07 89 d0 e8 12 56 05 00 5d c3 55 ba 06 00 00 00 89 e5 53 89 c3 b8 3d eb 7e c0 e8 16 74 00 00 <8b> 03 c1 e8 1e 69 c0 d8 02 00 00 05 b8 69 8e c0 2b 80 c4 02 00
[15840.675016] EIP: [<c0116a4f>] kmap+0x15/0x56 SS:ESP 0068:cab0bc94
[15840.675016] ---[ end trace 4fea40dad6b70e5f ]---
This happens because the return value of read_mapping_page() is passed on
to kmap unchecked. The bug is triggered after the first
read_mapping_page() in hfsplus_block_allocate(), this patch fixes all
three usages in this functions but leaves the ones further down in the
file unchanged.
Signed-off-by: Eric Sesterhenn <[email protected]>
Cc: Roman Zippel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
| 0
|
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
{
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, start, len, n;
__be32 val;
int i;
len = *max;
if (!len)
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
/* scan the first partial u32 for zero bits */
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = (1U << 31) >> i;
for (; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
/* scan complete u32s for the first zero bit */
while (1) {
while (curr < end) {
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = 1 << 31;
for (i = 0; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
}
kunmap(page);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
dprint(DBG_BITMAP, "bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
dprint(DBG_BITMAP, "bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
len = min(size - start, len);
while (1) {
n |= mask;
if (++i >= 32)
break;
mask >>= 1;
if (!--len || n & mask)
goto done;
}
if (!--len)
goto done;
*curr++ = cpu_to_be32(n);
/* do full u32s */
while (1) {
while (curr < end) {
n = be32_to_cpu(*curr);
if (len < 32)
goto last;
if (n) {
len = 32;
goto last;
}
*curr++ = cpu_to_be32(0xffffffff);
len -= 32;
}
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
if (IS_ERR(page)) {
start = size;
goto out;
}
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
last:
/* do any partial u32 at end */
mask = 1U << 31;
for (i = 0; i < len; i++) {
if (n & mask)
break;
n |= mask;
mask >>= 1;
}
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
HFSPLUS_SB(sb).free_blocks -= *max;
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
}
|
221598202771346823073737014328154892672
|
None
|
CWE-20
|
CVE-2008-4934
|
The hfsplus_block_allocate function in fs/hfsplus/bitmap.c in the Linux kernel before 2.6.28-rc1 does not check a certain return value from the read_mapping_page function before calling kmap, which allows attackers to cause a denial of service (system crash) via a crafted hfsplus filesystem image.
|
https://nvd.nist.gov/vuln/detail/CVE-2008-4934
|
|
215,549
|
gzip
|
a3db5806d012082b9e25cc36d09f19cd736a468f
|
http://git.savannah.gnu.org/cgit/gzip
|
http://git.savannah.gnu.org/cgit/gzip.git/commit/?id=a3db5806d012082b9e25cc36d09f19cd736a468f
|
gzip -d: do not clobber stack for valid input on x86_64
* unlzw.c (unlzw): Avoid integer overflow.
Aki Helin reported the segfault along with an input to trigger the bug.
* NEWS (Bug fixes): Mention it.
| 1
|
int unlzw(in, out)
int in, out; /* input and output file descriptors */
{
REG2 char_type *stackp;
REG3 code_int code;
REG4 int finchar;
REG5 code_int oldcode;
REG6 code_int incode;
REG7 long inbits;
REG8 long posbits;
REG9 int outpos;
/* REG10 int insize; (global) */
REG11 unsigned bitmask;
REG12 code_int free_ent;
REG13 code_int maxcode;
REG14 code_int maxmaxcode;
REG15 int n_bits;
REG16 int rsize;
#ifdef MAXSEG_64K
tab_prefix[0] = tab_prefix0;
tab_prefix[1] = tab_prefix1;
#endif
maxbits = get_byte();
block_mode = maxbits & BLOCK_MODE;
if ((maxbits & LZW_RESERVED) != 0) {
WARN((stderr, "\n%s: %s: warning, unknown flags 0x%x\n",
program_name, ifname, maxbits & LZW_RESERVED));
}
maxbits &= BIT_MASK;
maxmaxcode = MAXCODE(maxbits);
if (maxbits > BITS) {
fprintf(stderr,
"\n%s: %s: compressed with %d bits, can only handle %d bits\n",
program_name, ifname, maxbits, BITS);
exit_code = ERROR;
return ERROR;
}
rsize = insize;
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
oldcode = -1;
finchar = 0;
outpos = 0;
posbits = inptr<<3;
free_ent = ((block_mode) ? FIRST : 256);
clear_tab_prefixof(); /* Initialize the first 256 entries in the table. */
for (code = 255 ; code >= 0 ; --code) {
tab_suffixof(code) = (char_type)code;
}
do {
REG1 int i;
int e;
int o;
resetbuf:
e = insize-(o = (posbits>>3));
for (i = 0 ; i < e ; ++i) {
inbuf[i] = inbuf[i+o];
}
insize = e;
posbits = 0;
if (insize < INBUF_EXTRA) {
rsize = read_buffer (in, (char *) inbuf + insize, INBUFSIZ);
if (rsize == -1) {
read_error();
}
insize += rsize;
bytes_in += (off_t)rsize;
}
inbits = ((rsize != 0) ? ((long)insize - insize%n_bits)<<3 :
((long)insize<<3)-(n_bits-1));
while (inbits > posbits) {
if (free_ent > maxcode) {
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
++n_bits;
if (n_bits == maxbits) {
maxcode = maxmaxcode;
} else {
maxcode = MAXCODE(n_bits)-1;
}
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
input(inbuf,posbits,code,n_bits,bitmask);
Tracev((stderr, "%d ", code));
if (oldcode == -1) {
if (256 <= code)
gzip_error ("corrupt input.");
outbuf[outpos++] = (char_type)(finchar = (int)(oldcode=code));
continue;
}
if (code == CLEAR && block_mode) {
clear_tab_prefixof();
free_ent = FIRST - 1;
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
incode = code;
stackp = de_stack;
if (code >= free_ent) { /* Special case for KwKwK string. */
if (code > free_ent) {
#ifdef DEBUG
char_type *p;
posbits -= n_bits;
p = &inbuf[posbits>>3];
fprintf(stderr,
"code:%ld free_ent:%ld n_bits:%d insize:%u\n",
code, free_ent, n_bits, insize);
fprintf(stderr,
"posbits:%ld inbuf:%02X %02X %02X %02X %02X\n",
posbits, p[-1],p[0],p[1],p[2],p[3]);
#endif
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
gzip_error (to_stdout
? "corrupt input."
: "corrupt input. Use zcat to recover some data.");
}
*--stackp = (char_type)finchar;
code = oldcode;
}
while ((cmp_code_int)code >= (cmp_code_int)256) {
/* Generate output characters in reverse order */
*--stackp = tab_suffixof(code);
code = tab_prefixof(code);
}
*--stackp = (char_type)(finchar = tab_suffixof(code));
/* And put them out in forward order */
{
REG1 int i;
if (outpos+(i = (de_stack-stackp)) >= OUTBUFSIZ) {
do {
if (i > OUTBUFSIZ-outpos) i = OUTBUFSIZ-outpos;
if (i > 0) {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
if (outpos >= OUTBUFSIZ) {
if (!test) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
outpos = 0;
}
stackp+= i;
} while ((i = (de_stack-stackp)) > 0);
} else {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
}
if ((code = free_ent) < maxmaxcode) { /* Generate the new entry. */
tab_prefixof(code) = (unsigned short)oldcode;
tab_suffixof(code) = (char_type)finchar;
free_ent = code+1;
}
oldcode = incode; /* Remember previous code. */
}
} while (rsize != 0);
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
return OK;
}
|
315113145353857602270748585965585984173
|
None
|
CWE-189
|
CVE-2010-0001
|
Integer underflow in the unlzw function in unlzw.c in gzip before 1.4 on 64-bit platforms, as used in ncompress and probably others, allows remote attackers to cause a denial of service (application crash) or possibly execute arbitrary code via a crafted archive that uses LZW compression, leading to an array index error.
|
https://nvd.nist.gov/vuln/detail/CVE-2010-0001
|
|
491,979
|
gzip
|
a3db5806d012082b9e25cc36d09f19cd736a468f
|
http://git.savannah.gnu.org/cgit/gzip
|
http://git.savannah.gnu.org/cgit/gzip.git/commit/?id=a3db5806d012082b9e25cc36d09f19cd736a468f
|
gzip -d: do not clobber stack for valid input on x86_64
* unlzw.c (unlzw): Avoid integer overflow.
Aki Helin reported the segfault along with an input to trigger the bug.
* NEWS (Bug fixes): Mention it.
| 0
|
int unlzw(in, out)
int in, out; /* input and output file descriptors */
{
REG2 char_type *stackp;
REG3 code_int code;
REG4 int finchar;
REG5 code_int oldcode;
REG6 code_int incode;
REG7 long inbits;
REG8 long posbits;
REG9 int outpos;
/* REG10 int insize; (global) */
REG11 unsigned bitmask;
REG12 code_int free_ent;
REG13 code_int maxcode;
REG14 code_int maxmaxcode;
REG15 int n_bits;
REG16 int rsize;
#ifdef MAXSEG_64K
tab_prefix[0] = tab_prefix0;
tab_prefix[1] = tab_prefix1;
#endif
maxbits = get_byte();
block_mode = maxbits & BLOCK_MODE;
if ((maxbits & LZW_RESERVED) != 0) {
WARN((stderr, "\n%s: %s: warning, unknown flags 0x%x\n",
program_name, ifname, maxbits & LZW_RESERVED));
}
maxbits &= BIT_MASK;
maxmaxcode = MAXCODE(maxbits);
if (maxbits > BITS) {
fprintf(stderr,
"\n%s: %s: compressed with %d bits, can only handle %d bits\n",
program_name, ifname, maxbits, BITS);
exit_code = ERROR;
return ERROR;
}
rsize = insize;
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
oldcode = -1;
finchar = 0;
outpos = 0;
posbits = inptr<<3;
free_ent = ((block_mode) ? FIRST : 256);
clear_tab_prefixof(); /* Initialize the first 256 entries in the table. */
for (code = 255 ; code >= 0 ; --code) {
tab_suffixof(code) = (char_type)code;
}
do {
REG1 int i;
int e;
int o;
resetbuf:
o = posbits >> 3;
e = o <= insize ? insize - o : 0;
for (i = 0 ; i < e ; ++i) {
inbuf[i] = inbuf[i+o];
}
insize = e;
posbits = 0;
if (insize < INBUF_EXTRA) {
rsize = read_buffer (in, (char *) inbuf + insize, INBUFSIZ);
if (rsize == -1) {
read_error();
}
insize += rsize;
bytes_in += (off_t)rsize;
}
inbits = ((rsize != 0) ? ((long)insize - insize%n_bits)<<3 :
((long)insize<<3)-(n_bits-1));
while (inbits > posbits) {
if (free_ent > maxcode) {
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
++n_bits;
if (n_bits == maxbits) {
maxcode = maxmaxcode;
} else {
maxcode = MAXCODE(n_bits)-1;
}
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
input(inbuf,posbits,code,n_bits,bitmask);
Tracev((stderr, "%d ", code));
if (oldcode == -1) {
if (256 <= code)
gzip_error ("corrupt input.");
outbuf[outpos++] = (char_type)(finchar = (int)(oldcode=code));
continue;
}
if (code == CLEAR && block_mode) {
clear_tab_prefixof();
free_ent = FIRST - 1;
posbits = ((posbits-1) +
((n_bits<<3)-(posbits-1+(n_bits<<3))%(n_bits<<3)));
maxcode = MAXCODE(n_bits = INIT_BITS)-1;
bitmask = (1<<n_bits)-1;
goto resetbuf;
}
incode = code;
stackp = de_stack;
if (code >= free_ent) { /* Special case for KwKwK string. */
if (code > free_ent) {
#ifdef DEBUG
char_type *p;
posbits -= n_bits;
p = &inbuf[posbits>>3];
fprintf(stderr,
"code:%ld free_ent:%ld n_bits:%d insize:%u\n",
code, free_ent, n_bits, insize);
fprintf(stderr,
"posbits:%ld inbuf:%02X %02X %02X %02X %02X\n",
posbits, p[-1],p[0],p[1],p[2],p[3]);
#endif
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
gzip_error (to_stdout
? "corrupt input."
: "corrupt input. Use zcat to recover some data.");
}
*--stackp = (char_type)finchar;
code = oldcode;
}
while ((cmp_code_int)code >= (cmp_code_int)256) {
/* Generate output characters in reverse order */
*--stackp = tab_suffixof(code);
code = tab_prefixof(code);
}
*--stackp = (char_type)(finchar = tab_suffixof(code));
/* And put them out in forward order */
{
REG1 int i;
if (outpos+(i = (de_stack-stackp)) >= OUTBUFSIZ) {
do {
if (i > OUTBUFSIZ-outpos) i = OUTBUFSIZ-outpos;
if (i > 0) {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
if (outpos >= OUTBUFSIZ) {
if (!test) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
outpos = 0;
}
stackp+= i;
} while ((i = (de_stack-stackp)) > 0);
} else {
memcpy(outbuf+outpos, stackp, i);
outpos += i;
}
}
if ((code = free_ent) < maxmaxcode) { /* Generate the new entry. */
tab_prefixof(code) = (unsigned short)oldcode;
tab_suffixof(code) = (char_type)finchar;
free_ent = code+1;
}
oldcode = incode; /* Remember previous code. */
}
} while (rsize != 0);
if (!test && outpos > 0) {
write_buf(out, (char*)outbuf, outpos);
bytes_out += (off_t)outpos;
}
return OK;
}
|
39547802204189321461886480230956054723
|
None
|
CWE-189
|
CVE-2010-0001
|
Integer underflow in the unlzw function in unlzw.c in gzip before 1.4 on 64-bit platforms, as used in ncompress and probably others, allows remote attackers to cause a denial of service (application crash) or possibly execute arbitrary code via a crafted archive that uses LZW compression, leading to an array index error.
|
https://nvd.nist.gov/vuln/detail/CVE-2010-0001
|
|
215,921
|
grep
|
83a95bd8c8561875b948cadd417c653dbe7ef2e2
|
http://git.savannah.gnu.org/cgit/grep
|
http://git.sv.gnu.org/cgit/grep.git/commit/?id=83a95bd8c8561875b948cadd417c653dbe7ef2e2
|
grep -F: fix a heap buffer (read) overrun
grep's read buffer is often filled to its full size, except when
reading the final buffer of a file. In that case, the number of
bytes read may be far less than the size of the buffer. However, for
certain unusual pattern/text combinations, grep -F would mistakenly
examine bytes in that uninitialized region of memory when searching
for a match. With carefully chosen inputs, one can cause grep -F to
read beyond the end of that buffer altogether. This problem arose via
commit v2.18-90-g73893ff with the introduction of a more efficient
heuristic using what is now the memchr_kwset function. The use of
that function in bmexec_trans could leave TP much larger than EP,
and the subsequent call to bm_delta2_search would mistakenly access
beyond end of the main input read buffer.
* src/kwset.c (bmexec_trans): When TP reaches or exceeds EP,
do not call bm_delta2_search.
* tests/kwset-abuse: New file.
* tests/Makefile.am (TESTS): Add it.
* THANKS.in: Update.
* NEWS (Bug fixes): Mention it.
Prior to this patch, this command would trigger a UMR:
printf %0360db 0 | valgrind src/grep -F $(printf %019dXb 0)
Use of uninitialised value of size 8
at 0x4142BE: bmexec_trans (kwset.c:657)
by 0x4143CA: bmexec (kwset.c:678)
by 0x414973: kwsexec (kwset.c:848)
by 0x414DC4: Fexecute (kwsearch.c:128)
by 0x404E2E: grepbuf (grep.c:1238)
by 0x4054BF: grep (grep.c:1417)
by 0x405CEB: grepdesc (grep.c:1645)
by 0x405EC1: grep_command_line_arg (grep.c:1692)
by 0x4077D4: main (grep.c:2570)
See the accompanying test for how to trigger the heap buffer overrun.
Thanks to Nima Aghdaii for testing and finding numerous
ways to break early iterations of this patch.
| 1
|
bmexec_trans (kwset_t kwset, char const *text, size_t size)
{
unsigned char const *d1;
char const *ep, *sp, *tp;
int d;
int len = kwset->mind;
char const *trans = kwset->trans;
if (len == 0)
return 0;
if (len > size)
return -1;
if (len == 1)
{
tp = memchr_kwset (text, size, kwset);
return tp ? tp - text : -1;
}
d1 = kwset->delta;
sp = kwset->target + len;
tp = text + len;
char gc1 = kwset->gc1;
char gc2 = kwset->gc2;
/* Significance of 12: 1 (initial offset) + 10 (skip loop) + 1 (md2). */
if (size > 12 * len)
/* 11 is not a bug, the initial offset happens only once. */
for (ep = text + size - 11 * len; tp <= ep; )
{
char const *tp0 = tp;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
/* As a heuristic, prefer memchr to seeking by
delta1 when the latter doesn't advance much. */
int advance_heuristic = 16 * sizeof (long);
if (advance_heuristic <= tp - tp0)
goto big_advance;
tp--;
tp = memchr_kwset (tp, text + size - tp, kwset);
if (! tp)
return -1;
tp++;
}
}
}
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, d1, kwset))
return tp - text;
big_advance:;
}
/* Now we have only a few characters left to search. We
carefully avoid ever producing an out-of-bounds pointer. */
ep = text + size;
d = d1[U(tp[-1])];
while (d <= ep - tp)
{
d = d1[U((tp += d)[-1])];
if (d != 0)
continue;
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, NULL, kwset))
return tp - text;
}
return -1;
}
|
221145962274501315787647767738252319422
|
None
|
CWE-119
|
CVE-2015-1345
|
The bmexec_trans function in kwset.c in grep 2.19 through 2.21 allows local users to cause a denial of service (out-of-bounds heap read and crash) via crafted input when using the -F option.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-1345
|
|
497,809
|
grep
|
83a95bd8c8561875b948cadd417c653dbe7ef2e2
|
http://git.savannah.gnu.org/cgit/grep
|
http://git.sv.gnu.org/cgit/grep.git/commit/?id=83a95bd8c8561875b948cadd417c653dbe7ef2e2
|
grep -F: fix a heap buffer (read) overrun
grep's read buffer is often filled to its full size, except when
reading the final buffer of a file. In that case, the number of
bytes read may be far less than the size of the buffer. However, for
certain unusual pattern/text combinations, grep -F would mistakenly
examine bytes in that uninitialized region of memory when searching
for a match. With carefully chosen inputs, one can cause grep -F to
read beyond the end of that buffer altogether. This problem arose via
commit v2.18-90-g73893ff with the introduction of a more efficient
heuristic using what is now the memchr_kwset function. The use of
that function in bmexec_trans could leave TP much larger than EP,
and the subsequent call to bm_delta2_search would mistakenly access
beyond end of the main input read buffer.
* src/kwset.c (bmexec_trans): When TP reaches or exceeds EP,
do not call bm_delta2_search.
* tests/kwset-abuse: New file.
* tests/Makefile.am (TESTS): Add it.
* THANKS.in: Update.
* NEWS (Bug fixes): Mention it.
Prior to this patch, this command would trigger a UMR:
printf %0360db 0 | valgrind src/grep -F $(printf %019dXb 0)
Use of uninitialised value of size 8
at 0x4142BE: bmexec_trans (kwset.c:657)
by 0x4143CA: bmexec (kwset.c:678)
by 0x414973: kwsexec (kwset.c:848)
by 0x414DC4: Fexecute (kwsearch.c:128)
by 0x404E2E: grepbuf (grep.c:1238)
by 0x4054BF: grep (grep.c:1417)
by 0x405CEB: grepdesc (grep.c:1645)
by 0x405EC1: grep_command_line_arg (grep.c:1692)
by 0x4077D4: main (grep.c:2570)
See the accompanying test for how to trigger the heap buffer overrun.
Thanks to Nima Aghdaii for testing and finding numerous
ways to break early iterations of this patch.
| 0
|
bmexec_trans (kwset_t kwset, char const *text, size_t size)
{
unsigned char const *d1;
char const *ep, *sp, *tp;
int d;
int len = kwset->mind;
char const *trans = kwset->trans;
if (len == 0)
return 0;
if (len > size)
return -1;
if (len == 1)
{
tp = memchr_kwset (text, size, kwset);
return tp ? tp - text : -1;
}
d1 = kwset->delta;
sp = kwset->target + len;
tp = text + len;
char gc1 = kwset->gc1;
char gc2 = kwset->gc2;
/* Significance of 12: 1 (initial offset) + 10 (skip loop) + 1 (md2). */
if (size > 12 * len)
/* 11 is not a bug, the initial offset happens only once. */
for (ep = text + size - 11 * len; tp <= ep; )
{
char const *tp0 = tp;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
if (d != 0)
{
d = d1[U(tp[-1])], tp += d;
d = d1[U(tp[-1])], tp += d;
/* As a heuristic, prefer memchr to seeking by
delta1 when the latter doesn't advance much. */
int advance_heuristic = 16 * sizeof (long);
if (advance_heuristic <= tp - tp0)
goto big_advance;
tp--;
tp = memchr_kwset (tp, text + size - tp, kwset);
if (! tp)
return -1;
tp++;
if (ep <= tp)
break;
}
}
}
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, d1, kwset))
return tp - text;
big_advance:;
}
/* Now we have only a few characters left to search. We
carefully avoid ever producing an out-of-bounds pointer. */
ep = text + size;
d = d1[U(tp[-1])];
while (d <= ep - tp)
{
d = d1[U((tp += d)[-1])];
if (d != 0)
continue;
if (bm_delta2_search (&tp, ep, sp, len, trans, gc1, gc2, NULL, kwset))
return tp - text;
}
return -1;
}
|
262967935690402821011816255604665485413
|
None
|
CWE-119
|
CVE-2015-1345
|
The bmexec_trans function in kwset.c in grep 2.19 through 2.21 allows local users to cause a denial of service (out-of-bounds heap read and crash) via crafted input when using the -F option.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-1345
|
|
215,948
|
nettle
|
c71d2c9d20eeebb985e3872e4550137209e3ce4d
|
https://git.lysator.liu.se/nettle/nettle
|
https://git.lysator.liu.se/nettle/nettle/commit/c71d2c9d20eeebb985e3872e4550137209e3ce4d
|
Fixed miscomputation bugs in secp-256r1 modulo functions.
| 1
|
ecc_256_modp (const struct ecc_modulo *p, mp_limb_t *rp)
{
mp_limb_t u1, u0;
mp_size_t n;
n = 2*p->size;
u1 = rp[--n];
u0 = rp[n-1];
/* This is not particularly fast, but should work well with assembly implementation. */
for (; n >= p->size; n--)
{
mp_limb_t q2, q1, q0, t, cy;
/* <q2, q1, q0> = v * u1 + <u1,u0>, with v = 2^32 - 1:
+---+---+
| u1| u0|
+---+---+
|-u1|
+-+-+-+
| u1|
+---+-+-+-+-+
| q2| q1| q0|
+---+---+---+
*/
q1 = u1 - (u1 > u0);
q0 = u0 - u1;
t = u1 << 32;
q0 += t;
t = (u1 >> 32) + (q0 < t) + 1;
q1 += t;
q2 = q1 < t;
/* Compute candidate remainder */
u1 = u0 + (q1 << 32) - q1;
t = -(mp_limb_t) (u1 > q0);
u1 -= t & 0xffffffff;
q1 += t;
q2 += t + (q1 < t);
assert (q2 < 2);
/* We multiply by two low limbs of p, 2^96 - 1, so we could use
shifts rather than mul. */
t = mpn_submul_1 (rp + n - 4, p->m, 2, q1);
t += cnd_sub_n (q2, rp + n - 3, p->m, 1);
t += (-q2) & 0xffffffff;
u0 = rp[n-2];
cy = (u0 < t);
u0 -= t;
t = (u1 < cy);
u1 -= cy;
u1 += cnd_add_n (t, rp + n - 4, p->m, 3);
u1 -= (-t) & 0xffffffff;
}
rp[2] = u0;
rp[3] = u1;
}
|
238786089108920286298904536145396627402
|
None
|
CWE-310
|
CVE-2015-8803
|
The ecc_256_modp function in ecc-256.c in Nettle before 3.2 does not properly handle carry propagation and produces incorrect output in its implementation of the P-256 NIST elliptic curve, which allows attackers to have unspecified impact via unknown vectors, a different vulnerability than CVE-2015-8805.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-8803
|
|
498,160
|
nettle
|
c71d2c9d20eeebb985e3872e4550137209e3ce4d
|
https://git.lysator.liu.se/nettle/nettle
|
https://git.lysator.liu.se/nettle/nettle/commit/c71d2c9d20eeebb985e3872e4550137209e3ce4d
|
Fixed miscomputation bugs in secp-256r1 modulo functions.
| 0
|
ecc_256_modp (const struct ecc_modulo *p, mp_limb_t *rp)
{
mp_limb_t u1, u0;
mp_size_t n;
n = 2*p->size;
u1 = rp[--n];
u0 = rp[n-1];
/* This is not particularly fast, but should work well with assembly implementation. */
for (; n >= p->size; n--)
{
mp_limb_t q2, q1, q0, t, cy;
/* <q2, q1, q0> = v * u1 + <u1,u0>, with v = 2^32 - 1:
+---+---+
| u1| u0|
+---+---+
|-u1|
+-+-+-+
| u1|
+---+-+-+-+-+
| q2| q1| q0|
+---+---+---+
*/
q1 = u1 - (u1 > u0);
q0 = u0 - u1;
t = u1 << 32;
q0 += t;
t = (u1 >> 32) + (q0 < t) + 1;
q1 += t;
q2 = q1 < t;
/* Compute candidate remainder */
u1 = u0 + (q1 << 32) - q1;
t = -(mp_limb_t) (u1 > q0);
u1 -= t & 0xffffffff;
q1 += t;
q2 += t + (q1 < t);
assert (q2 < 2);
/*
n-1 n-2 n-3 n-4
+---+---+---+---+
| u1| u0| u low |
+---+---+---+---+
- | q1(2^96-1)|
+-------+---+
|q2(2^.)|
+-------+
We multiply by two low limbs of p, 2^96 - 1, so we could use
shifts rather than mul.
*/
t = mpn_submul_1 (rp + n - 4, p->m, 2, q1);
t += cnd_sub_n (q2, rp + n - 3, p->m, 1);
t += (-q2) & 0xffffffff;
u0 = rp[n-2];
cy = (u0 < t);
u0 -= t;
t = (u1 < cy);
u1 -= cy;
cy = cnd_add_n (t, rp + n - 4, p->m, 2);
u0 += cy;
u1 += (u0 < cy);
u1 -= (-t) & 0xffffffff;
}
rp[2] = u0;
rp[3] = u1;
}
|
9762716495291847327179594432373951677
|
None
|
CWE-310
|
CVE-2015-8803
|
The ecc_256_modp function in ecc-256.c in Nettle before 3.2 does not properly handle carry propagation and produces incorrect output in its implementation of the P-256 NIST elliptic curve, which allows attackers to have unspecified impact via unknown vectors, a different vulnerability than CVE-2015-8805.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-8803
|
|
216,027
|
tar
|
cb07844454d8cc9fb21f53ace75975f91185a120
|
http://git.savannah.gnu.org/cgit/tar
|
http://git.savannah.gnu.org/cgit/tar.git/commit/?id=cb07844454d8cc9fb21f53ace75975f91185a120
|
Fix possible NULL dereference (savannah bug #55369)
* src/sparse.c (pax_decode_header): Check return from find_next_block.
| 1
|
pax_decode_header (struct tar_sparse_file *file)
{
if (file->stat_info->sparse_major > 0)
{
uintmax_t u;
char nbuf[UINTMAX_STRSIZE_BOUND];
union block *blk;
char *p;
size_t i;
off_t start;
#define COPY_BUF(b,buf,src) do \
{ \
char *endp = b->buffer + BLOCKSIZE; \
char *dst = buf; \
do \
{ \
if (dst == buf + UINTMAX_STRSIZE_BOUND -1) \
{ \
ERROR ((0, 0, _("%s: numeric overflow in sparse archive member"), \
file->stat_info->orig_file_name)); \
return false; \
} \
if (src == endp) \
{ \
set_next_block_after (b); \
b = find_next_block (); \
src = b->buffer; \
endp = b->buffer + BLOCKSIZE; \
} \
*dst = *src++; \
} \
while (*dst++ != '\n'); \
dst[-1] = 0; \
} while (0)
start = current_block_ordinal ();
set_next_block_after (current_header);
blk = find_next_block ();
p = blk->buffer;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (size_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
file->stat_info->sparse_map_size = u;
file->stat_info->sparse_map = xcalloc (file->stat_info->sparse_map_size,
sizeof (*file->stat_info->sparse_map));
file->stat_info->sparse_map_avail = 0;
for (i = 0; i < file->stat_info->sparse_map_size; i++)
{
struct sp_array sp;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (off_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
sp.offset = u;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (off_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
sp.numbytes = u;
sparse_add_map (file->stat_info, &sp);
}
set_next_block_after (blk);
file->dumped_size += BLOCKSIZE * (current_block_ordinal () - start);
}
return true;
}
|
160355660991121839913024673798590735910
|
None
|
CWE-476
|
CVE-2019-9923
|
pax_decode_header in sparse.c in GNU Tar before 1.32 had a NULL pointer dereference when parsing certain archives that have malformed extended headers.
|
https://nvd.nist.gov/vuln/detail/CVE-2019-9923
|
|
498,918
|
tar
|
cb07844454d8cc9fb21f53ace75975f91185a120
|
http://git.savannah.gnu.org/cgit/tar
|
http://git.savannah.gnu.org/cgit/tar.git/commit/?id=cb07844454d8cc9fb21f53ace75975f91185a120
|
Fix possible NULL dereference (savannah bug #55369)
* src/sparse.c (pax_decode_header): Check return from find_next_block.
| 0
|
pax_decode_header (struct tar_sparse_file *file)
{
if (file->stat_info->sparse_major > 0)
{
uintmax_t u;
char nbuf[UINTMAX_STRSIZE_BOUND];
union block *blk;
char *p;
size_t i;
off_t start;
#define COPY_BUF(b,buf,src) do \
{ \
char *endp = b->buffer + BLOCKSIZE; \
char *dst = buf; \
do \
{ \
if (dst == buf + UINTMAX_STRSIZE_BOUND -1) \
{ \
ERROR ((0, 0, _("%s: numeric overflow in sparse archive member"), \
file->stat_info->orig_file_name)); \
return false; \
} \
if (src == endp) \
{ \
set_next_block_after (b); \
b = find_next_block (); \
if (!b) \
FATAL_ERROR ((0, 0, _("Unexpected EOF in archive"))); \
src = b->buffer; \
endp = b->buffer + BLOCKSIZE; \
} \
*dst = *src++; \
} \
while (*dst++ != '\n'); \
dst[-1] = 0; \
} while (0)
start = current_block_ordinal ();
set_next_block_after (current_header);
blk = find_next_block ();
if (!blk)
FATAL_ERROR ((0, 0, _("Unexpected EOF in archive")));
p = blk->buffer;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (size_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
file->stat_info->sparse_map_size = u;
file->stat_info->sparse_map = xcalloc (file->stat_info->sparse_map_size,
sizeof (*file->stat_info->sparse_map));
file->stat_info->sparse_map_avail = 0;
for (i = 0; i < file->stat_info->sparse_map_size; i++)
{
struct sp_array sp;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (off_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
sp.offset = u;
COPY_BUF (blk,nbuf,p);
if (!decode_num (&u, nbuf, TYPE_MAXIMUM (off_t)))
{
ERROR ((0, 0, _("%s: malformed sparse archive member"),
file->stat_info->orig_file_name));
return false;
}
sp.numbytes = u;
sparse_add_map (file->stat_info, &sp);
}
set_next_block_after (blk);
file->dumped_size += BLOCKSIZE * (current_block_ordinal () - start);
}
return true;
}
|
43484957058485536945482588311169940116
|
None
|
CWE-476
|
CVE-2019-9923
|
pax_decode_header in sparse.c in GNU Tar before 1.32 had a NULL pointer dereference when parsing certain archives that have malformed extended headers.
|
https://nvd.nist.gov/vuln/detail/CVE-2019-9923
|
|
216,126
|
openssl
|
cca1cd9a3447dd067503e4a85ebd1679ee78a48e
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/cca1cd9a3447dd067503e4a85ebd1679ee78a48e
|
Submitted by: Tomas Hoger <[email protected]>
Fix for CVE-2010-0433 where some kerberos enabled versions of OpenSSL
could be crashed if the relevant tables were not present (e.g. chrooted).
| 1
|
kssl_keytab_is_available(KSSL_CTX *kssl_ctx)
{
krb5_context krb5context = NULL;
krb5_keytab krb5keytab = NULL;
krb5_keytab_entry entry;
krb5_principal princ = NULL;
krb5_error_code krb5rc = KRB5KRB_ERR_GENERIC;
int rc = 0;
if ((krb5rc = krb5_init_context(&krb5context)))
return(0);
/* kssl_ctx->keytab_file == NULL ==> use Kerberos default
*/
if (kssl_ctx->keytab_file)
{
krb5rc = krb5_kt_resolve(krb5context, kssl_ctx->keytab_file,
&krb5keytab);
if (krb5rc)
goto exit;
}
else
{
krb5rc = krb5_kt_default(krb5context,&krb5keytab);
if (krb5rc)
goto exit;
}
/* the host key we are looking for */
krb5rc = krb5_sname_to_principal(krb5context, NULL,
kssl_ctx->service_name ? kssl_ctx->service_name: KRB5SVC,
KRB5_NT_SRV_HST, &princ);
krb5rc = krb5_kt_get_entry(krb5context, krb5keytab,
princ,
0 /* IGNORE_VNO */,
0 /* IGNORE_ENCTYPE */,
&entry);
if ( krb5rc == KRB5_KT_NOTFOUND ) {
rc = 1;
goto exit;
} else if ( krb5rc )
goto exit;
krb5_kt_free_entry(krb5context, &entry);
rc = 1;
exit:
if (krb5keytab) krb5_kt_close(krb5context, krb5keytab);
if (princ) krb5_free_principal(krb5context, princ);
if (krb5context) krb5_free_context(krb5context);
return(rc);
}
|
99893605133546740733833372677927547347
|
None
|
CWE-20
|
CVE-2010-0433
|
The kssl_keytab_is_available function in ssl/kssl.c in OpenSSL before 0.9.8n, when Kerberos is enabled but Kerberos configuration files cannot be opened, does not check a certain return value, which allows remote attackers to cause a denial of service (NULL pointer dereference and daemon crash) via SSL cipher negotiation, as demonstrated by a chroot installation of Dovecot or stunnel without Kerberos configuration files inside the chroot.
|
https://nvd.nist.gov/vuln/detail/CVE-2010-0433
|
|
500,044
|
openssl
|
cca1cd9a3447dd067503e4a85ebd1679ee78a48e
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/cca1cd9a3447dd067503e4a85ebd1679ee78a48e
|
Submitted by: Tomas Hoger <[email protected]>
Fix for CVE-2010-0433 where some kerberos enabled versions of OpenSSL
could be crashed if the relevant tables were not present (e.g. chrooted).
| 0
|
kssl_keytab_is_available(KSSL_CTX *kssl_ctx)
{
krb5_context krb5context = NULL;
krb5_keytab krb5keytab = NULL;
krb5_keytab_entry entry;
krb5_principal princ = NULL;
krb5_error_code krb5rc = KRB5KRB_ERR_GENERIC;
int rc = 0;
if ((krb5rc = krb5_init_context(&krb5context)))
return(0);
/* kssl_ctx->keytab_file == NULL ==> use Kerberos default
*/
if (kssl_ctx->keytab_file)
{
krb5rc = krb5_kt_resolve(krb5context, kssl_ctx->keytab_file,
&krb5keytab);
if (krb5rc)
goto exit;
}
else
{
krb5rc = krb5_kt_default(krb5context,&krb5keytab);
if (krb5rc)
goto exit;
}
/* the host key we are looking for */
krb5rc = krb5_sname_to_principal(krb5context, NULL,
kssl_ctx->service_name ? kssl_ctx->service_name: KRB5SVC,
KRB5_NT_SRV_HST, &princ);
if (krb5rc)
goto exit;
krb5rc = krb5_kt_get_entry(krb5context, krb5keytab,
princ,
0 /* IGNORE_VNO */,
0 /* IGNORE_ENCTYPE */,
&entry);
if ( krb5rc == KRB5_KT_NOTFOUND ) {
rc = 1;
goto exit;
} else if ( krb5rc )
goto exit;
krb5_kt_free_entry(krb5context, &entry);
rc = 1;
exit:
if (krb5keytab) krb5_kt_close(krb5context, krb5keytab);
if (princ) krb5_free_principal(krb5context, princ);
if (krb5context) krb5_free_context(krb5context);
return(rc);
}
|
53603406923488065201694869344560864035
|
None
|
CWE-20
|
CVE-2010-0433
|
The kssl_keytab_is_available function in ssl/kssl.c in OpenSSL before 0.9.8n, when Kerberos is enabled but Kerberos configuration files cannot be opened, does not check a certain return value, which allows remote attackers to cause a denial of service (NULL pointer dereference and daemon crash) via SSL cipher negotiation, as demonstrated by a chroot installation of Dovecot or stunnel without Kerberos configuration files inside the chroot.
|
https://nvd.nist.gov/vuln/detail/CVE-2010-0433
|
|
216,202
|
libssh
|
4d8420f3282ed07fc99fc5e930c17df27ef1e9b2
|
http://git.libssh.org/projects/libssh
|
http://git.libssh.org/projects/libssh.git/commit/?h=v0-5&id=4d8420f3282ed07fc99fc5e930c17df27ef1e9b2
|
sftp: Fix bug in sftp_mkdir not returning on error.
resolves: #84
(cherry picked from commit a92c97b2e17715c1b3cdd693d14af6c3311d8e44)
| 1
|
int sftp_mkdir(sftp_session sftp, const char *directory, mode_t mode) {
sftp_status_message status = NULL;
sftp_message msg = NULL;
sftp_attributes errno_attr = NULL;
struct sftp_attributes_struct attr;
ssh_buffer buffer;
ssh_string path;
uint32_t id;
buffer = ssh_buffer_new();
if (buffer == NULL) {
ssh_set_error_oom(sftp->session);
return -1;
}
path = ssh_string_from_char(directory);
if (path == NULL) {
ssh_set_error_oom(sftp->session);
ssh_buffer_free(buffer);
return -1;
}
ZERO_STRUCT(attr);
attr.permissions = mode;
attr.flags = SSH_FILEXFER_ATTR_PERMISSIONS;
id = sftp_get_new_id(sftp);
if (buffer_add_u32(buffer, id) < 0 ||
buffer_add_ssh_string(buffer, path) < 0 ||
buffer_add_attributes(buffer, &attr) < 0 ||
sftp_packet_write(sftp, SSH_FXP_MKDIR, buffer) < 0) {
ssh_buffer_free(buffer);
ssh_string_free(path);
}
ssh_buffer_free(buffer);
ssh_string_free(path);
while (msg == NULL) {
if (sftp_read_and_dispatch(sftp) < 0) {
return -1;
}
msg = sftp_dequeue(sftp, id);
}
/* By specification, this command only returns SSH_FXP_STATUS */
if (msg->packet_type == SSH_FXP_STATUS) {
status = parse_status_msg(msg);
sftp_message_free(msg);
if (status == NULL) {
return -1;
}
sftp_set_error(sftp, status->status);
switch (status->status) {
case SSH_FX_FAILURE:
/*
* mkdir always returns a failure, even if the path already exists.
* To be POSIX conform and to be able to map it to EEXIST a stat
* call is needed here.
*/
errno_attr = sftp_lstat(sftp, directory);
if (errno_attr != NULL) {
SAFE_FREE(errno_attr);
sftp_set_error(sftp, SSH_FX_FILE_ALREADY_EXISTS);
}
break;
case SSH_FX_OK:
status_msg_free(status);
return 0;
break;
default:
break;
}
/*
* The status should be SSH_FX_OK if the command was successful, if it
* didn't, then there was an error
*/
ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
"SFTP server: %s", status->errormsg);
status_msg_free(status);
return -1;
} else {
ssh_set_error(sftp->session, SSH_FATAL,
"Received message %d when attempting to make directory",
msg->packet_type);
sftp_message_free(msg);
}
return -1;
}
|
318072065183989103598769219049127518412
|
None
|
CWE-399
|
CVE-2012-4559
|
Multiple double free vulnerabilities in the (1) agent_sign_data function in agent.c, (2) channel_request function in channels.c, (3) ssh_userauth_pubkey function in auth.c, (4) sftp_parse_attr_3 function in sftp.c, and (5) try_publickey_from_file function in keyfiles.c in libssh before 0.5.3 allow remote attackers to cause a denial of service (crash) and possibly execute arbitrary code via unspecified vectors.
|
https://nvd.nist.gov/vuln/detail/CVE-2012-4559
|
|
500,663
|
libssh
|
4d8420f3282ed07fc99fc5e930c17df27ef1e9b2
|
http://git.libssh.org/projects/libssh
|
http://git.libssh.org/projects/libssh.git/commit/?h=v0-5&id=4d8420f3282ed07fc99fc5e930c17df27ef1e9b2
|
sftp: Fix bug in sftp_mkdir not returning on error.
resolves: #84
(cherry picked from commit a92c97b2e17715c1b3cdd693d14af6c3311d8e44)
| 0
|
int sftp_mkdir(sftp_session sftp, const char *directory, mode_t mode) {
sftp_status_message status = NULL;
sftp_message msg = NULL;
sftp_attributes errno_attr = NULL;
struct sftp_attributes_struct attr;
ssh_buffer buffer;
ssh_string path;
uint32_t id;
buffer = ssh_buffer_new();
if (buffer == NULL) {
ssh_set_error_oom(sftp->session);
return -1;
}
path = ssh_string_from_char(directory);
if (path == NULL) {
ssh_set_error_oom(sftp->session);
ssh_buffer_free(buffer);
return -1;
}
ZERO_STRUCT(attr);
attr.permissions = mode;
attr.flags = SSH_FILEXFER_ATTR_PERMISSIONS;
id = sftp_get_new_id(sftp);
if (buffer_add_u32(buffer, id) < 0 ||
buffer_add_ssh_string(buffer, path) < 0 ||
buffer_add_attributes(buffer, &attr) < 0 ||
sftp_packet_write(sftp, SSH_FXP_MKDIR, buffer) < 0) {
ssh_buffer_free(buffer);
ssh_string_free(path);
return -1;
}
ssh_buffer_free(buffer);
ssh_string_free(path);
while (msg == NULL) {
if (sftp_read_and_dispatch(sftp) < 0) {
return -1;
}
msg = sftp_dequeue(sftp, id);
}
/* By specification, this command only returns SSH_FXP_STATUS */
if (msg->packet_type == SSH_FXP_STATUS) {
status = parse_status_msg(msg);
sftp_message_free(msg);
if (status == NULL) {
return -1;
}
sftp_set_error(sftp, status->status);
switch (status->status) {
case SSH_FX_FAILURE:
/*
* mkdir always returns a failure, even if the path already exists.
* To be POSIX conform and to be able to map it to EEXIST a stat
* call is needed here.
*/
errno_attr = sftp_lstat(sftp, directory);
if (errno_attr != NULL) {
SAFE_FREE(errno_attr);
sftp_set_error(sftp, SSH_FX_FILE_ALREADY_EXISTS);
}
break;
case SSH_FX_OK:
status_msg_free(status);
return 0;
break;
default:
break;
}
/*
* The status should be SSH_FX_OK if the command was successful, if it
* didn't, then there was an error
*/
ssh_set_error(sftp->session, SSH_REQUEST_DENIED,
"SFTP server: %s", status->errormsg);
status_msg_free(status);
return -1;
} else {
ssh_set_error(sftp->session, SSH_FATAL,
"Received message %d when attempting to make directory",
msg->packet_type);
sftp_message_free(msg);
}
return -1;
}
|
57484647322304387183846447984144924668
|
None
|
CWE-399
|
CVE-2012-4559
|
Multiple double free vulnerabilities in the (1) agent_sign_data function in agent.c, (2) channel_request function in channels.c, (3) ssh_userauth_pubkey function in auth.c, (4) sftp_parse_attr_3 function in sftp.c, and (5) try_publickey_from_file function in keyfiles.c in libssh before 0.5.3 allow remote attackers to cause a denial of service (crash) and possibly execute arbitrary code via unspecified vectors.
|
https://nvd.nist.gov/vuln/detail/CVE-2012-4559
|
|
216,515
|
openssl
|
939b4960276b040fc0ed52232238fcc9e2e9ec21
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/939b4960276b040fc0ed52232238fcc9e2e9ec21
|
Fix race condition in NewSessionTicket
If a NewSessionTicket is received by a multi-threaded client when
attempting to reuse a previous ticket then a race condition can occur
potentially leading to a double free of the ticket data.
CVE-2015-1791
This also fixes RT#3808 where a session ID is changed for a session already
in the client session cache. Since the session ID is the key to the cache
this breaks the cache access.
Parts of this patch were inspired by this Akamai change:
https://github.com/akamai/openssl/commit/c0bf69a791239ceec64509f9f19fcafb2461b0d3
Reviewed-by: Rich Salz <[email protected]>
(cherry picked from commit 27c76b9b8010b536687318739c6f631ce4194688)
Conflicts:
ssl/ssl.h
ssl/ssl_err.c
| 1
|
int ssl3_get_new_session_ticket(SSL *s)
{
int ok, al, ret = 0, ticklen;
long n;
const unsigned char *p;
unsigned char *d;
n = s->method->ssl_get_message(s,
SSL3_ST_CR_SESSION_TICKET_A,
SSL3_ST_CR_SESSION_TICKET_B,
SSL3_MT_NEWSESSION_TICKET, 16384, &ok);
if (!ok)
return ((int)n);
if (n < 6) {
/* need at least ticket_lifetime_hint + ticket length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
p = d = (unsigned char *)s->init_msg;
n2l(p, s->session->tlsext_tick_lifetime_hint);
n2s(p, ticklen);
/* ticket_lifetime_hint + ticket_length + ticket */
if (ticklen + 6 != n) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if (s->session->tlsext_tick) {
OPENSSL_free(s->session->tlsext_tick);
s->session->tlsext_ticklen = 0;
}
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (!s->session->tlsext_tick) {
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->session->tlsext_tick, p, ticklen);
s->session->tlsext_ticklen = ticklen;
/*
* There are two ways to detect a resumed ticket session. One is to set
* an appropriate session ID and then the server must return a match in
* ServerHello. This allows the normal client session ID matching to work
* and we know much earlier that the ticket has been accepted. The
* other way is to set zero length session ID when the ticket is
* presented and rely on the handshake to determine session resumption.
* We choose the former approach because this fits in with assumptions
* elsewhere in OpenSSL. The session ID is set to the SHA256 (or SHA1 is
* SHA256 is disabled) hash of the ticket.
*/
EVP_Digest(p, ticklen,
s->session->session_id, &s->session->session_id_length,
# ifndef OPENSSL_NO_SHA256
EVP_sha256(), NULL);
# else
EVP_sha1(), NULL);
# endif
ret = 1;
return (ret);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
s->state = SSL_ST_ERR;
return (-1);
}
|
262201276305976211218627982974228415604
|
None
|
CWE-362
|
CVE-2015-1791
|
Race condition in the ssl3_get_new_session_ticket function in ssl/s3_clnt.c in OpenSSL before 0.9.8zg, 1.0.0 before 1.0.0s, 1.0.1 before 1.0.1n, and 1.0.2 before 1.0.2b, when used for a multi-threaded client, allows remote attackers to cause a denial of service (double free and application crash) or possibly have unspecified other impact by providing a NewSessionTicket during an attempt to reuse a ticket that had been obtained earlier.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-1791
|
|
502,735
|
openssl
|
939b4960276b040fc0ed52232238fcc9e2e9ec21
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/939b4960276b040fc0ed52232238fcc9e2e9ec21
|
Fix race condition in NewSessionTicket
If a NewSessionTicket is received by a multi-threaded client when
attempting to reuse a previous ticket then a race condition can occur
potentially leading to a double free of the ticket data.
CVE-2015-1791
This also fixes RT#3808 where a session ID is changed for a session already
in the client session cache. Since the session ID is the key to the cache
this breaks the cache access.
Parts of this patch were inspired by this Akamai change:
https://github.com/akamai/openssl/commit/c0bf69a791239ceec64509f9f19fcafb2461b0d3
Reviewed-by: Rich Salz <[email protected]>
(cherry picked from commit 27c76b9b8010b536687318739c6f631ce4194688)
Conflicts:
ssl/ssl.h
ssl/ssl_err.c
| 0
|
int ssl3_get_new_session_ticket(SSL *s)
{
int ok, al, ret = 0, ticklen;
long n;
const unsigned char *p;
unsigned char *d;
n = s->method->ssl_get_message(s,
SSL3_ST_CR_SESSION_TICKET_A,
SSL3_ST_CR_SESSION_TICKET_B,
SSL3_MT_NEWSESSION_TICKET, 16384, &ok);
if (!ok)
return ((int)n);
if (n < 6) {
/* need at least ticket_lifetime_hint + ticket length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
p = d = (unsigned char *)s->init_msg;
if (s->session->session_id_length > 0) {
int i = s->session_ctx->session_cache_mode;
SSL_SESSION *new_sess;
/*
* We reused an existing session, so we need to replace it with a new
* one
*/
if (i & SSL_SESS_CACHE_CLIENT) {
/*
* Remove the old session from the cache
*/
if (i & SSL_SESS_CACHE_NO_INTERNAL_STORE) {
if (s->session_ctx->remove_session_cb != NULL)
s->session_ctx->remove_session_cb(s->session_ctx,
s->session);
} else {
/* We carry on if this fails */
SSL_CTX_remove_session(s->session_ctx, s->session);
}
}
if ((new_sess = ssl_session_dup(s->session, 0)) == 0) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE);
goto f_err;
}
SSL_SESSION_free(s->session);
s->session = new_sess;
}
n2l(p, s->session->tlsext_tick_lifetime_hint);
n2s(p, ticklen);
/* ticket_lifetime_hint + ticket_length + ticket */
if (ticklen + 6 != n) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
if (s->session->tlsext_tick) {
OPENSSL_free(s->session->tlsext_tick);
s->session->tlsext_ticklen = 0;
}
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (!s->session->tlsext_tick) {
SSLerr(SSL_F_SSL3_GET_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->session->tlsext_tick, p, ticklen);
s->session->tlsext_ticklen = ticklen;
/*
* There are two ways to detect a resumed ticket session. One is to set
* an appropriate session ID and then the server must return a match in
* ServerHello. This allows the normal client session ID matching to work
* and we know much earlier that the ticket has been accepted. The
* other way is to set zero length session ID when the ticket is
* presented and rely on the handshake to determine session resumption.
* We choose the former approach because this fits in with assumptions
* elsewhere in OpenSSL. The session ID is set to the SHA256 (or SHA1 is
* SHA256 is disabled) hash of the ticket.
*/
EVP_Digest(p, ticklen,
s->session->session_id, &s->session->session_id_length,
# ifndef OPENSSL_NO_SHA256
EVP_sha256(), NULL);
# else
EVP_sha1(), NULL);
# endif
ret = 1;
return (ret);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
s->state = SSL_ST_ERR;
return (-1);
}
|
24541052959849141799219681889549728285
|
None
|
CWE-362
|
CVE-2015-1791
|
Race condition in the ssl3_get_new_session_ticket function in ssl/s3_clnt.c in OpenSSL before 0.9.8zg, 1.0.0 before 1.0.0s, 1.0.1 before 1.0.1n, and 1.0.2 before 1.0.2b, when used for a multi-threaded client, allows remote attackers to cause a denial of service (double free and application crash) or possibly have unspecified other impact by providing a NewSessionTicket during an attempt to reuse a ticket that had been obtained earlier.
|
https://nvd.nist.gov/vuln/detail/CVE-2015-1791
|
|
216,637
|
guile
|
245608911698adb3472803856019bdd5670b6614
|
http://git.savannah.gnu.org/cgit/guile
|
http://git.savannah.gnu.org/cgit/guile.git/commit/?h=stable-2.0&id=245608911698adb3472803856019bdd5670b6614
|
Remove 'umask' calls from 'mkdir'.
Fixes <http://bugs.gnu.org/24659>.
* libguile/filesys.c (SCM_DEFINE): Remove calls to 'umask' when MODE is
unbound; instead, use 0777 as the mode. Update docstring to clarify
this.
* doc/ref/posix.texi (File System): Adjust accordingly.
* NEWS: Mention it.
| 1
|
SCM_DEFINE (scm_mkdir, "mkdir", 1, 1, 0,
(SCM path, SCM mode),
"Create a new directory named by @var{path}. If @var{mode} is omitted\n"
"then the permissions of the directory file are set using the current\n"
"umask. Otherwise they are set to the decimal value specified with\n"
"@var{mode}. The return value is unspecified.")
#define FUNC_NAME s_scm_mkdir
{
int rv;
mode_t mask;
if (SCM_UNBNDP (mode))
{
mask = umask (0);
umask (mask);
STRING_SYSCALL (path, c_path, rv = mkdir (c_path, 0777 ^ mask));
}
else
{
STRING_SYSCALL (path, c_path, rv = mkdir (c_path, scm_to_uint (mode)));
}
if (rv != 0)
SCM_SYSERROR;
return SCM_UNSPECIFIED;
}
|
128331080543585624737822334688372409592
|
None
|
CWE-275
|
CVE-2016-8605
|
The mkdir procedure of GNU Guile temporarily changed the process' umask to zero. During that time window, in a multithreaded application, other threads could end up creating files with insecure permissions. For example, mkdir without the optional mode argument would create directories as 0777. This is fixed in Guile 2.0.13. Prior versions are affected.
|
https://nvd.nist.gov/vuln/detail/CVE-2016-8605
|
|
503,867
|
guile
|
245608911698adb3472803856019bdd5670b6614
|
http://git.savannah.gnu.org/cgit/guile
|
http://git.savannah.gnu.org/cgit/guile.git/commit/?h=stable-2.0&id=245608911698adb3472803856019bdd5670b6614
|
Remove 'umask' calls from 'mkdir'.
Fixes <http://bugs.gnu.org/24659>.
* libguile/filesys.c (SCM_DEFINE): Remove calls to 'umask' when MODE is
unbound; instead, use 0777 as the mode. Update docstring to clarify
this.
* doc/ref/posix.texi (File System): Adjust accordingly.
* NEWS: Mention it.
| 0
|
SCM_DEFINE (scm_mkdir, "mkdir", 1, 1, 0,
(SCM path, SCM mode),
"Create a new directory named by @var{path}. If @var{mode} is omitted\n"
"then the permissions of the directory are set to @code{#o777}\n"
"masked with the current umask (@pxref{Processes, @code{umask}}).\n"
"Otherwise they are set to the value specified with @var{mode}.\n"
"The return value is unspecified.")
#define FUNC_NAME s_scm_mkdir
{
int rv;
mode_t c_mode;
c_mode = SCM_UNBNDP (mode) ? 0777 : scm_to_uint (mode);
STRING_SYSCALL (path, c_path, rv = mkdir (c_path, c_mode));
if (rv != 0)
SCM_SYSERROR;
return SCM_UNSPECIFIED;
}
|
144536922752412825349714870039112918366
|
None
|
CWE-275
|
CVE-2016-8605
|
The mkdir procedure of GNU Guile temporarily changed the process' umask to zero. During that time window, in a multithreaded application, other threads could end up creating files with insecure permissions. For example, mkdir without the optional mode argument would create directories as 0777. This is fixed in Guile 2.0.13. Prior versions are affected.
|
https://nvd.nist.gov/vuln/detail/CVE-2016-8605
|
|
216,654
|
core
|
2c3f37672277b1f73f84722802aaa0ab1ab3e413
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/2c3f37672277b1f73f84722802aaa0ab1ab3e413
|
auth: Don't crash expanding %variables when username isn't set.
This continues the auth-policy fix in
c3d3faa4f72a676e183f34be960cff13a5a725ae
| 1
|
auth_request_get_var_expand_table_full(const struct auth_request *auth_request,
auth_request_escape_func_t *escape_func,
unsigned int *count)
{
const unsigned int auth_count =
N_ELEMENTS(auth_request_var_expand_static_tab);
struct var_expand_table *tab, *ret_tab;
const char *orig_user, *auth_user;
if (escape_func == NULL)
escape_func = escape_none;
/* keep the extra fields at the beginning. the last static_tab field
contains the ending NULL-fields. */
tab = ret_tab = t_malloc((*count + auth_count) * sizeof(*tab));
memset(tab, 0, *count * sizeof(*tab));
tab += *count;
*count += auth_count;
memcpy(tab, auth_request_var_expand_static_tab,
auth_count * sizeof(*tab));
tab[0].value = escape_func(auth_request->user, auth_request);
tab[1].value = escape_func(t_strcut(auth_request->user, '@'),
auth_request);
tab[2].value = strchr(auth_request->user, '@');
if (tab[2].value != NULL)
tab[2].value = escape_func(tab[2].value+1, auth_request);
tab[3].value = escape_func(auth_request->service, auth_request);
/* tab[4] = we have no home dir */
if (auth_request->local_ip.family != 0)
tab[5].value = net_ip2addr(&auth_request->local_ip);
if (auth_request->remote_ip.family != 0)
tab[6].value = net_ip2addr(&auth_request->remote_ip);
tab[7].value = dec2str(auth_request->client_pid);
if (auth_request->mech_password != NULL) {
tab[8].value = escape_func(auth_request->mech_password,
auth_request);
}
if (auth_request->userdb_lookup) {
tab[9].value = auth_request->userdb == NULL ? "" :
dec2str(auth_request->userdb->userdb->id);
} else {
tab[9].value = auth_request->passdb == NULL ? "" :
dec2str(auth_request->passdb->passdb->id);
}
tab[10].value = auth_request->mech_name == NULL ? "" :
escape_func(auth_request->mech_name, auth_request);
tab[11].value = auth_request->secured ? "secured" : "";
tab[12].value = dec2str(auth_request->local_port);
tab[13].value = dec2str(auth_request->remote_port);
tab[14].value = auth_request->valid_client_cert ? "valid" : "";
if (auth_request->requested_login_user != NULL) {
const char *login_user = auth_request->requested_login_user;
tab[15].value = escape_func(login_user, auth_request);
tab[16].value = escape_func(t_strcut(login_user, '@'),
auth_request);
tab[17].value = strchr(login_user, '@');
if (tab[17].value != NULL) {
tab[17].value = escape_func(tab[17].value+1,
auth_request);
}
}
tab[18].value = auth_request->session_id == NULL ? NULL :
escape_func(auth_request->session_id, auth_request);
if (auth_request->real_local_ip.family != 0)
tab[19].value = net_ip2addr(&auth_request->real_local_ip);
if (auth_request->real_remote_ip.family != 0)
tab[20].value = net_ip2addr(&auth_request->real_remote_ip);
tab[21].value = dec2str(auth_request->real_local_port);
tab[22].value = dec2str(auth_request->real_remote_port);
tab[23].value = strchr(auth_request->user, '@');
if (tab[23].value != NULL) {
tab[23].value = escape_func(t_strcut(tab[23].value+1, '@'),
auth_request);
}
tab[24].value = strrchr(auth_request->user, '@');
if (tab[24].value != NULL)
tab[24].value = escape_func(tab[24].value+1, auth_request);
tab[25].value = auth_request->master_user == NULL ? NULL :
escape_func(auth_request->master_user, auth_request);
tab[26].value = auth_request->session_pid == (pid_t)-1 ? NULL :
dec2str(auth_request->session_pid);
orig_user = auth_request->original_username != NULL ?
auth_request->original_username : auth_request->user;
tab[27].value = escape_func(orig_user, auth_request);
tab[28].value = escape_func(t_strcut(orig_user, '@'), auth_request);
tab[29].value = strchr(orig_user, '@');
if (tab[29].value != NULL)
tab[29].value = escape_func(tab[29].value+1, auth_request);
if (auth_request->master_user != NULL)
auth_user = auth_request->master_user;
else
auth_user = orig_user;
tab[30].value = escape_func(auth_user, auth_request);
tab[31].value = escape_func(t_strcut(auth_user, '@'), auth_request);
tab[32].value = strchr(auth_user, '@');
if (tab[32].value != NULL)
tab[32].value = escape_func(tab[32].value+1, auth_request);
if (auth_request->local_name != NULL)
tab[33].value = escape_func(auth_request->local_name, auth_request);
else
tab[33].value = "";
return ret_tab;
}
|
262295488878616392807601246569405637828
|
None
|
CWE-20
|
CVE-2016-8652
|
The auth component in Dovecot before 2.2.27, when auth-policy is configured, allows a remote attackers to cause a denial of service (crash) by aborting authentication without setting a username.
|
https://nvd.nist.gov/vuln/detail/CVE-2016-8652
|
|
503,985
|
core
|
2c3f37672277b1f73f84722802aaa0ab1ab3e413
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/2c3f37672277b1f73f84722802aaa0ab1ab3e413
|
auth: Don't crash expanding %variables when username isn't set.
This continues the auth-policy fix in
c3d3faa4f72a676e183f34be960cff13a5a725ae
| 0
|
auth_request_get_var_expand_table_full(const struct auth_request *auth_request,
auth_request_escape_func_t *escape_func,
unsigned int *count)
{
const unsigned int auth_count =
N_ELEMENTS(auth_request_var_expand_static_tab);
struct var_expand_table *tab, *ret_tab;
const char *orig_user, *auth_user, *username;
if (escape_func == NULL)
escape_func = escape_none;
/* keep the extra fields at the beginning. the last static_tab field
contains the ending NULL-fields. */
tab = ret_tab = t_malloc((*count + auth_count) * sizeof(*tab));
memset(tab, 0, *count * sizeof(*tab));
tab += *count;
*count += auth_count;
memcpy(tab, auth_request_var_expand_static_tab,
auth_count * sizeof(*tab));
username = auth_request->user != NULL ? auth_request->user : "";
tab[0].value = escape_func(username, auth_request);
tab[1].value = escape_func(t_strcut(username, '@'),
auth_request);
tab[2].value = strchr(username, '@');
if (tab[2].value != NULL)
tab[2].value = escape_func(tab[2].value+1, auth_request);
tab[3].value = escape_func(auth_request->service, auth_request);
/* tab[4] = we have no home dir */
if (auth_request->local_ip.family != 0)
tab[5].value = net_ip2addr(&auth_request->local_ip);
if (auth_request->remote_ip.family != 0)
tab[6].value = net_ip2addr(&auth_request->remote_ip);
tab[7].value = dec2str(auth_request->client_pid);
if (auth_request->mech_password != NULL) {
tab[8].value = escape_func(auth_request->mech_password,
auth_request);
}
if (auth_request->userdb_lookup) {
tab[9].value = auth_request->userdb == NULL ? "" :
dec2str(auth_request->userdb->userdb->id);
} else {
tab[9].value = auth_request->passdb == NULL ? "" :
dec2str(auth_request->passdb->passdb->id);
}
tab[10].value = auth_request->mech_name == NULL ? "" :
escape_func(auth_request->mech_name, auth_request);
tab[11].value = auth_request->secured ? "secured" : "";
tab[12].value = dec2str(auth_request->local_port);
tab[13].value = dec2str(auth_request->remote_port);
tab[14].value = auth_request->valid_client_cert ? "valid" : "";
if (auth_request->requested_login_user != NULL) {
const char *login_user = auth_request->requested_login_user;
tab[15].value = escape_func(login_user, auth_request);
tab[16].value = escape_func(t_strcut(login_user, '@'),
auth_request);
tab[17].value = strchr(login_user, '@');
if (tab[17].value != NULL) {
tab[17].value = escape_func(tab[17].value+1,
auth_request);
}
}
tab[18].value = auth_request->session_id == NULL ? NULL :
escape_func(auth_request->session_id, auth_request);
if (auth_request->real_local_ip.family != 0)
tab[19].value = net_ip2addr(&auth_request->real_local_ip);
if (auth_request->real_remote_ip.family != 0)
tab[20].value = net_ip2addr(&auth_request->real_remote_ip);
tab[21].value = dec2str(auth_request->real_local_port);
tab[22].value = dec2str(auth_request->real_remote_port);
tab[23].value = strchr(username, '@');
if (tab[23].value != NULL) {
tab[23].value = escape_func(t_strcut(tab[23].value+1, '@'),
auth_request);
}
tab[24].value = strrchr(username, '@');
if (tab[24].value != NULL)
tab[24].value = escape_func(tab[24].value+1, auth_request);
tab[25].value = auth_request->master_user == NULL ? NULL :
escape_func(auth_request->master_user, auth_request);
tab[26].value = auth_request->session_pid == (pid_t)-1 ? NULL :
dec2str(auth_request->session_pid);
orig_user = auth_request->original_username != NULL ?
auth_request->original_username : username;
tab[27].value = escape_func(orig_user, auth_request);
tab[28].value = escape_func(t_strcut(orig_user, '@'), auth_request);
tab[29].value = strchr(orig_user, '@');
if (tab[29].value != NULL)
tab[29].value = escape_func(tab[29].value+1, auth_request);
if (auth_request->master_user != NULL)
auth_user = auth_request->master_user;
else
auth_user = orig_user;
tab[30].value = escape_func(auth_user, auth_request);
tab[31].value = escape_func(t_strcut(auth_user, '@'), auth_request);
tab[32].value = strchr(auth_user, '@');
if (tab[32].value != NULL)
tab[32].value = escape_func(tab[32].value+1, auth_request);
if (auth_request->local_name != NULL)
tab[33].value = escape_func(auth_request->local_name, auth_request);
else
tab[33].value = "";
return ret_tab;
}
|
300800458657154811271321575901169213921
|
None
|
CWE-20
|
CVE-2016-8652
|
The auth component in Dovecot before 2.2.27, when auth-policy is configured, allows a remote attackers to cause a denial of service (crash) by aborting authentication without setting a username.
|
https://nvd.nist.gov/vuln/detail/CVE-2016-8652
|
|
216,701
|
libtiff
|
dc02f9050311a90b3c0655147cee09bfa7081cfc
|
https://github.com/vadz/libtiff
|
https://github.com/vadz/libtiff/commit/dc02f9050311a90b3c0655147cee09bfa7081cfc
|
* libtiff/tif_read.c: add protection against excessive memory
allocation attempts in TIFFReadDirEntryArray() on short files.
Effective for mmap'ed case. And non-mmap'ed case, but restricted
to 64bit builds.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2675
| 1
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryArrayWithLimit(
TIFF* tif, TIFFDirEntry* direntry, uint32* count, uint32 desttypesize,
void** value, uint64 maxcount)
{
int typesize;
uint32 datasize;
void* data;
uint64 target_count64;
typesize=TIFFDataWidth(direntry->tdir_type);
target_count64 = (direntry->tdir_count > maxcount) ?
maxcount : direntry->tdir_count;
if ((target_count64==0)||(typesize==0))
{
*value=0;
return(TIFFReadDirEntryErrOk);
}
(void) desttypesize;
/*
* As a sanity check, make sure we have no more than a 2GB tag array
* in either the current data type or the dest data type. This also
* avoids problems with overflow of tmsize_t on 32bit systems.
*/
if ((uint64)(2147483647/typesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
if ((uint64)(2147483647/desttypesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
*count=(uint32)target_count64;
datasize=(*count)*typesize;
assert((tmsize_t)datasize>0);
data=_TIFFCheckMalloc(tif, *count, typesize, "ReadDirEntryArray");
if (data==0)
return(TIFFReadDirEntryErrAlloc);
if (!(tif->tif_flags&TIFF_BIGTIFF))
{
if (datasize<=4)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint32 offset = direntry->tdir_offset.toff_long;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(&offset);
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
else
{
if (datasize<=8)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint64 offset = direntry->tdir_offset.toff_long8;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8(&offset);
err=TIFFReadDirEntryData(tif,offset,(tmsize_t)datasize,data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
*value=data;
return(TIFFReadDirEntryErrOk);
}
|
251386727160240066128978497337284996796
|
None
|
CWE-770
|
CVE-2017-12944
|
The TIFFReadDirEntryArray function in tif_read.c in LibTIFF 4.0.8 mishandles memory allocation for short files, which allows remote attackers to cause a denial of service (allocation failure and application crash) in the TIFFFetchStripThing function in tif_dirread.c during a tiff2pdf invocation.
|
https://nvd.nist.gov/vuln/detail/CVE-2017-12944
|
|
504,608
|
libtiff
|
dc02f9050311a90b3c0655147cee09bfa7081cfc
|
https://github.com/vadz/libtiff
|
https://github.com/vadz/libtiff/commit/dc02f9050311a90b3c0655147cee09bfa7081cfc
|
* libtiff/tif_read.c: add protection against excessive memory
allocation attempts in TIFFReadDirEntryArray() on short files.
Effective for mmap'ed case. And non-mmap'ed case, but restricted
to 64bit builds.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2675
| 0
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryArrayWithLimit(
TIFF* tif, TIFFDirEntry* direntry, uint32* count, uint32 desttypesize,
void** value, uint64 maxcount)
{
int typesize;
uint32 datasize;
void* data;
uint64 target_count64;
typesize=TIFFDataWidth(direntry->tdir_type);
target_count64 = (direntry->tdir_count > maxcount) ?
maxcount : direntry->tdir_count;
if ((target_count64==0)||(typesize==0))
{
*value=0;
return(TIFFReadDirEntryErrOk);
}
(void) desttypesize;
/*
* As a sanity check, make sure we have no more than a 2GB tag array
* in either the current data type or the dest data type. This also
* avoids problems with overflow of tmsize_t on 32bit systems.
*/
if ((uint64)(2147483647/typesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
if ((uint64)(2147483647/desttypesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
*count=(uint32)target_count64;
datasize=(*count)*typesize;
assert((tmsize_t)datasize>0);
if( isMapped(tif) && datasize > tif->tif_size )
return TIFFReadDirEntryErrIo;
if( !isMapped(tif) &&
(((tif->tif_flags&TIFF_BIGTIFF) && datasize > 8) ||
(!(tif->tif_flags&TIFF_BIGTIFF) && datasize > 4)) )
{
data = NULL;
}
else
{
data=_TIFFCheckMalloc(tif, *count, typesize, "ReadDirEntryArray");
if (data==0)
return(TIFFReadDirEntryErrAlloc);
}
if (!(tif->tif_flags&TIFF_BIGTIFF))
{
if (datasize<=4)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint32 offset = direntry->tdir_offset.toff_long;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(&offset);
if( isMapped(tif) )
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
else
err=TIFFReadDirEntryDataAndRealloc(tif,(uint64)offset,(tmsize_t)datasize,&data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
else
{
if (datasize<=8)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint64 offset = direntry->tdir_offset.toff_long8;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8(&offset);
if( isMapped(tif) )
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
else
err=TIFFReadDirEntryDataAndRealloc(tif,(uint64)offset,(tmsize_t)datasize,&data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
*value=data;
return(TIFFReadDirEntryErrOk);
}
|
283163990857495325814682097643200308064
|
None
|
CWE-770
|
CVE-2017-12944
|
The TIFFReadDirEntryArray function in tif_read.c in LibTIFF 4.0.8 mishandles memory allocation for short files, which allows remote attackers to cause a denial of service (allocation failure and application crash) in the TIFFFetchStripThing function in tif_dirread.c during a tiff2pdf invocation.
|
https://nvd.nist.gov/vuln/detail/CVE-2017-12944
|
|
216,726
|
openssl
|
f426625b6ae9a7831010750490a5f0ad689c5ba3
|
https://github.com/openssl/openssl
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=f426625b6ae9a7831010750490a5f0ad689c5ba3
|
Prevent over long nonces in ChaCha20-Poly1305
ChaCha20-Poly1305 is an AEAD cipher, and requires a unique nonce input for
every encryption operation. RFC 7539 specifies that the nonce value (IV)
should be 96 bits (12 bytes). OpenSSL allows a variable nonce length and
front pads the nonce with 0 bytes if it is less than 12 bytes. However it
also incorrectly allows a nonce to be set of up to 16 bytes. In this case
only the last 12 bytes are significant and any additional leading bytes are
ignored.
It is a requirement of using this cipher that nonce values are unique.
Messages encrypted using a reused nonce value are susceptible to serious
confidentiality and integrity attacks. If an application changes the
default nonce length to be longer than 12 bytes and then makes a change to
the leading bytes of the nonce expecting the new value to be a new unique
nonce then such an application could inadvertently encrypt messages with a
reused nonce.
Additionally the ignored bytes in a long nonce are not covered by the
integrity guarantee of this cipher. Any application that relies on the
integrity of these ignored leading bytes of a long nonce may be further
affected.
Any OpenSSL internal use of this cipher, including in SSL/TLS, is safe
because no such use sets such a long nonce value. However user
applications that use this cipher directly and set a non-default nonce
length to be longer than 12 bytes may be vulnerable.
CVE-2019-1543
Fixes #8345
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Richard Levitte <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/8406)
(cherry picked from commit 2a3d0ee9d59156c48973592331404471aca886d6)
| 1
|
static int chacha20_poly1305_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr)
{
EVP_CHACHA_AEAD_CTX *actx = aead_data(ctx);
switch(type) {
case EVP_CTRL_INIT:
if (actx == NULL)
actx = ctx->cipher_data
= OPENSSL_zalloc(sizeof(*actx) + Poly1305_ctx_size());
if (actx == NULL) {
EVPerr(EVP_F_CHACHA20_POLY1305_CTRL, EVP_R_INITIALIZATION_ERROR);
return 0;
}
actx->len.aad = 0;
actx->len.text = 0;
actx->aad = 0;
actx->mac_inited = 0;
actx->tag_len = 0;
actx->nonce_len = 12;
actx->tls_payload_length = NO_TLS_PAYLOAD_LENGTH;
memset(actx->tls_aad, 0, POLY1305_BLOCK_SIZE);
return 1;
case EVP_CTRL_COPY:
if (actx) {
EVP_CIPHER_CTX *dst = (EVP_CIPHER_CTX *)ptr;
dst->cipher_data =
OPENSSL_memdup(actx, sizeof(*actx) + Poly1305_ctx_size());
if (dst->cipher_data == NULL) {
EVPerr(EVP_F_CHACHA20_POLY1305_CTRL, EVP_R_COPY_ERROR);
return 0;
}
}
return 1;
case EVP_CTRL_AEAD_SET_IVLEN:
if (arg <= 0 || arg > CHACHA_CTR_SIZE)
return 0;
actx->nonce_len = arg;
return 1;
case EVP_CTRL_AEAD_SET_IV_FIXED:
if (arg != 12)
return 0;
actx->nonce[0] = actx->key.counter[1]
= CHACHA_U8TOU32((unsigned char *)ptr);
actx->nonce[1] = actx->key.counter[2]
= CHACHA_U8TOU32((unsigned char *)ptr+4);
actx->nonce[2] = actx->key.counter[3]
= CHACHA_U8TOU32((unsigned char *)ptr+8);
return 1;
case EVP_CTRL_AEAD_SET_TAG:
if (arg <= 0 || arg > POLY1305_BLOCK_SIZE)
return 0;
if (ptr != NULL) {
memcpy(actx->tag, ptr, arg);
actx->tag_len = arg;
}
return 1;
case EVP_CTRL_AEAD_GET_TAG:
if (arg <= 0 || arg > POLY1305_BLOCK_SIZE || !ctx->encrypt)
return 0;
memcpy(ptr, actx->tag, arg);
return 1;
case EVP_CTRL_AEAD_TLS1_AAD:
if (arg != EVP_AEAD_TLS1_AAD_LEN)
return 0;
{
unsigned int len;
unsigned char *aad = ptr;
memcpy(actx->tls_aad, ptr, EVP_AEAD_TLS1_AAD_LEN);
len = aad[EVP_AEAD_TLS1_AAD_LEN - 2] << 8 |
aad[EVP_AEAD_TLS1_AAD_LEN - 1];
aad = actx->tls_aad;
if (!ctx->encrypt) {
if (len < POLY1305_BLOCK_SIZE)
return 0;
len -= POLY1305_BLOCK_SIZE; /* discount attached tag */
aad[EVP_AEAD_TLS1_AAD_LEN - 2] = (unsigned char)(len >> 8);
aad[EVP_AEAD_TLS1_AAD_LEN - 1] = (unsigned char)len;
}
actx->tls_payload_length = len;
/*
* merge record sequence number as per RFC7905
*/
actx->key.counter[1] = actx->nonce[0];
actx->key.counter[2] = actx->nonce[1] ^ CHACHA_U8TOU32(aad);
actx->key.counter[3] = actx->nonce[2] ^ CHACHA_U8TOU32(aad+4);
actx->mac_inited = 0;
return POLY1305_BLOCK_SIZE; /* tag length */
}
case EVP_CTRL_AEAD_SET_MAC_KEY:
/* no-op */
return 1;
default:
return -1;
}
}
|
82290393614199201513765816848927157706
|
None
|
CWE-327
|
CVE-2019-1543
|
ChaCha20-Poly1305 is an AEAD cipher, and requires a unique nonce input for every encryption operation. RFC 7539 specifies that the nonce value (IV) should be 96 bits (12 bytes). OpenSSL allows a variable nonce length and front pads the nonce with 0 bytes if it is less than 12 bytes. However it also incorrectly allows a nonce to be set of up to 16 bytes. In this case only the last 12 bytes are significant and any additional leading bytes are ignored. It is a requirement of using this cipher that nonce values are unique. Messages encrypted using a reused nonce value are susceptible to serious confidentiality and integrity attacks. If an application changes the default nonce length to be longer than 12 bytes and then makes a change to the leading bytes of the nonce expecting the new value to be a new unique nonce then such an application could inadvertently encrypt messages with a reused nonce. Additionally the ignored bytes in a long nonce are not covered by the integrity guarantee of this cipher. Any application that relies on the integrity of these ignored leading bytes of a long nonce may be further affected. Any OpenSSL internal use of this cipher, including in SSL/TLS, is safe because no such use sets such a long nonce value. However user applications that use this cipher directly and set a non-default nonce length to be longer than 12 bytes may be vulnerable. OpenSSL versions 1.1.1 and 1.1.0 are affected by this issue. Due to the limited scope of affected deployments this has been assessed as low severity and therefore we are not creating new releases at this time. Fixed in OpenSSL 1.1.1c (Affected 1.1.1-1.1.1b). Fixed in OpenSSL 1.1.0k (Affected 1.1.0-1.1.0j).
|
https://nvd.nist.gov/vuln/detail/CVE-2019-1543
|
|
505,463
|
openssl
|
f426625b6ae9a7831010750490a5f0ad689c5ba3
|
https://github.com/openssl/openssl
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=f426625b6ae9a7831010750490a5f0ad689c5ba3
|
Prevent over long nonces in ChaCha20-Poly1305
ChaCha20-Poly1305 is an AEAD cipher, and requires a unique nonce input for
every encryption operation. RFC 7539 specifies that the nonce value (IV)
should be 96 bits (12 bytes). OpenSSL allows a variable nonce length and
front pads the nonce with 0 bytes if it is less than 12 bytes. However it
also incorrectly allows a nonce to be set of up to 16 bytes. In this case
only the last 12 bytes are significant and any additional leading bytes are
ignored.
It is a requirement of using this cipher that nonce values are unique.
Messages encrypted using a reused nonce value are susceptible to serious
confidentiality and integrity attacks. If an application changes the
default nonce length to be longer than 12 bytes and then makes a change to
the leading bytes of the nonce expecting the new value to be a new unique
nonce then such an application could inadvertently encrypt messages with a
reused nonce.
Additionally the ignored bytes in a long nonce are not covered by the
integrity guarantee of this cipher. Any application that relies on the
integrity of these ignored leading bytes of a long nonce may be further
affected.
Any OpenSSL internal use of this cipher, including in SSL/TLS, is safe
because no such use sets such a long nonce value. However user
applications that use this cipher directly and set a non-default nonce
length to be longer than 12 bytes may be vulnerable.
CVE-2019-1543
Fixes #8345
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Richard Levitte <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/8406)
(cherry picked from commit 2a3d0ee9d59156c48973592331404471aca886d6)
| 0
|
static int chacha20_poly1305_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
void *ptr)
{
EVP_CHACHA_AEAD_CTX *actx = aead_data(ctx);
switch(type) {
case EVP_CTRL_INIT:
if (actx == NULL)
actx = ctx->cipher_data
= OPENSSL_zalloc(sizeof(*actx) + Poly1305_ctx_size());
if (actx == NULL) {
EVPerr(EVP_F_CHACHA20_POLY1305_CTRL, EVP_R_INITIALIZATION_ERROR);
return 0;
}
actx->len.aad = 0;
actx->len.text = 0;
actx->aad = 0;
actx->mac_inited = 0;
actx->tag_len = 0;
actx->nonce_len = 12;
actx->tls_payload_length = NO_TLS_PAYLOAD_LENGTH;
memset(actx->tls_aad, 0, POLY1305_BLOCK_SIZE);
return 1;
case EVP_CTRL_COPY:
if (actx) {
EVP_CIPHER_CTX *dst = (EVP_CIPHER_CTX *)ptr;
dst->cipher_data =
OPENSSL_memdup(actx, sizeof(*actx) + Poly1305_ctx_size());
if (dst->cipher_data == NULL) {
EVPerr(EVP_F_CHACHA20_POLY1305_CTRL, EVP_R_COPY_ERROR);
return 0;
}
}
return 1;
case EVP_CTRL_AEAD_SET_IVLEN:
if (arg <= 0 || arg > CHACHA20_POLY1305_MAX_IVLEN)
return 0;
actx->nonce_len = arg;
return 1;
case EVP_CTRL_AEAD_SET_IV_FIXED:
if (arg != 12)
return 0;
actx->nonce[0] = actx->key.counter[1]
= CHACHA_U8TOU32((unsigned char *)ptr);
actx->nonce[1] = actx->key.counter[2]
= CHACHA_U8TOU32((unsigned char *)ptr+4);
actx->nonce[2] = actx->key.counter[3]
= CHACHA_U8TOU32((unsigned char *)ptr+8);
return 1;
case EVP_CTRL_AEAD_SET_TAG:
if (arg <= 0 || arg > POLY1305_BLOCK_SIZE)
return 0;
if (ptr != NULL) {
memcpy(actx->tag, ptr, arg);
actx->tag_len = arg;
}
return 1;
case EVP_CTRL_AEAD_GET_TAG:
if (arg <= 0 || arg > POLY1305_BLOCK_SIZE || !ctx->encrypt)
return 0;
memcpy(ptr, actx->tag, arg);
return 1;
case EVP_CTRL_AEAD_TLS1_AAD:
if (arg != EVP_AEAD_TLS1_AAD_LEN)
return 0;
{
unsigned int len;
unsigned char *aad = ptr;
memcpy(actx->tls_aad, ptr, EVP_AEAD_TLS1_AAD_LEN);
len = aad[EVP_AEAD_TLS1_AAD_LEN - 2] << 8 |
aad[EVP_AEAD_TLS1_AAD_LEN - 1];
aad = actx->tls_aad;
if (!ctx->encrypt) {
if (len < POLY1305_BLOCK_SIZE)
return 0;
len -= POLY1305_BLOCK_SIZE; /* discount attached tag */
aad[EVP_AEAD_TLS1_AAD_LEN - 2] = (unsigned char)(len >> 8);
aad[EVP_AEAD_TLS1_AAD_LEN - 1] = (unsigned char)len;
}
actx->tls_payload_length = len;
/*
* merge record sequence number as per RFC7905
*/
actx->key.counter[1] = actx->nonce[0];
actx->key.counter[2] = actx->nonce[1] ^ CHACHA_U8TOU32(aad);
actx->key.counter[3] = actx->nonce[2] ^ CHACHA_U8TOU32(aad+4);
actx->mac_inited = 0;
return POLY1305_BLOCK_SIZE; /* tag length */
}
case EVP_CTRL_AEAD_SET_MAC_KEY:
/* no-op */
return 1;
default:
return -1;
}
}
|
319328437340198657902119470811531700376
|
None
|
CWE-327
|
CVE-2019-1543
|
ChaCha20-Poly1305 is an AEAD cipher, and requires a unique nonce input for every encryption operation. RFC 7539 specifies that the nonce value (IV) should be 96 bits (12 bytes). OpenSSL allows a variable nonce length and front pads the nonce with 0 bytes if it is less than 12 bytes. However it also incorrectly allows a nonce to be set of up to 16 bytes. In this case only the last 12 bytes are significant and any additional leading bytes are ignored. It is a requirement of using this cipher that nonce values are unique. Messages encrypted using a reused nonce value are susceptible to serious confidentiality and integrity attacks. If an application changes the default nonce length to be longer than 12 bytes and then makes a change to the leading bytes of the nonce expecting the new value to be a new unique nonce then such an application could inadvertently encrypt messages with a reused nonce. Additionally the ignored bytes in a long nonce are not covered by the integrity guarantee of this cipher. Any application that relies on the integrity of these ignored leading bytes of a long nonce may be further affected. Any OpenSSL internal use of this cipher, including in SSL/TLS, is safe because no such use sets such a long nonce value. However user applications that use this cipher directly and set a non-default nonce length to be longer than 12 bytes may be vulnerable. OpenSSL versions 1.1.1 and 1.1.0 are affected by this issue. Due to the limited scope of affected deployments this has been assessed as low severity and therefore we are not creating new releases at this time. Fixed in OpenSSL 1.1.1c (Affected 1.1.1-1.1.1b). Fixed in OpenSSL 1.1.0k (Affected 1.1.0-1.1.0j).
|
https://nvd.nist.gov/vuln/detail/CVE-2019-1543
|
|
216,767
|
core
|
ed4b7d5d1b30964216d61d3090a7b47a957f5b26
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/ed4b7d5d1b30964216d61d3090a7b47a957f5b26
|
lib-smtp: smtp-command-parser - Fix infinite loop occurring when command stream ends in UTF-8 character.
Broken by 8f08f1944be438a2422b604c08e5060b5c7bd72f.
| 1
|
static int smtp_command_parse_parameters(struct smtp_command_parser *parser)
{
const unsigned char *p, *mp;
uoff_t max_size = (parser->auth_response ?
parser->limits.max_auth_size :
parser->limits.max_parameters_size);
/* We assume parameters to match textstr (HT, SP, Printable US-ASCII).
For command parameters, we also accept valid UTF-8 characters.
*/
p = parser->cur + parser->state.poff;
while (p < parser->end) {
unichar_t ch;
int nch = 1;
if (parser->auth_response)
ch = *p;
else {
nch = uni_utf8_get_char_n(p, (size_t)(p - parser->end),
&ch);
}
if (nch < 0) {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BAD_COMMAND,
"Invalid UTF-8 character in command parameters");
return -1;
}
if ((parser->auth_response || (ch & 0x80) == 0x00) &&
!smtp_char_is_textstr((unsigned char)ch))
break;
p += nch;
}
if (max_size > 0 && (uoff_t)(p - parser->cur) > max_size) {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_LINE_TOO_LONG,
"%s line is too long",
(parser->auth_response ?
"AUTH response" : "Command"));
return -1;
}
parser->state.poff = p - parser->cur;
if (p == parser->end)
return 0;
/* In the interest of improved interoperability, SMTP receivers SHOULD
tolerate trailing white space before the terminating <CRLF>.
WSP = SP / HTAB ; white space
--> Trim the end of the buffer
*/
mp = p;
if (mp > parser->cur) {
while (mp > parser->cur && (*(mp-1) == ' ' || *(mp-1) == '\t'))
mp--;
}
if (!parser->auth_response && mp > parser->cur && *parser->cur == ' ') {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BAD_COMMAND,
"Duplicate space after command name");
return -1;
}
parser->state.cmd_params = i_strdup_until(parser->cur, mp);
parser->cur = p;
parser->state.poff = 0;
return 1;
}
|
181341317141954506069810684888107522957
|
None
|
CWE-835
|
CVE-2020-7046
|
lib-smtp in submission-login and lmtp in Dovecot 2.3.9 before 2.3.9.3 mishandles truncated UTF-8 data in command parameters, as demonstrated by the unauthenticated triggering of a submission-login infinite loop.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-7046
|
|
505,654
|
core
|
ed4b7d5d1b30964216d61d3090a7b47a957f5b26
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/ed4b7d5d1b30964216d61d3090a7b47a957f5b26
|
lib-smtp: smtp-command-parser - Fix infinite loop occurring when command stream ends in UTF-8 character.
Broken by 8f08f1944be438a2422b604c08e5060b5c7bd72f.
| 0
|
static int smtp_command_parse_parameters(struct smtp_command_parser *parser)
{
const unsigned char *p, *mp;
uoff_t max_size = (parser->auth_response ?
parser->limits.max_auth_size :
parser->limits.max_parameters_size);
int nch = 1;
/* We assume parameters to match textstr (HT, SP, Printable US-ASCII).
For command parameters, we also accept valid UTF-8 characters.
*/
p = parser->cur + parser->state.poff;
while (p < parser->end) {
unichar_t ch;
if (parser->auth_response)
ch = *p;
else {
nch = uni_utf8_get_char_n(p, (size_t)(p - parser->end),
&ch);
}
if (nch == 0)
break;
if (nch < 0) {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BAD_COMMAND,
"Invalid UTF-8 character in command parameters");
return -1;
}
if ((parser->auth_response || (ch & 0x80) == 0x00) &&
!smtp_char_is_textstr((unsigned char)ch))
break;
p += nch;
}
if (max_size > 0 && (uoff_t)(p - parser->cur) > max_size) {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_LINE_TOO_LONG,
"%s line is too long",
(parser->auth_response ?
"AUTH response" : "Command"));
return -1;
}
parser->state.poff = p - parser->cur;
if (p == parser->end || nch == 0)
return 0;
/* In the interest of improved interoperability, SMTP receivers SHOULD
tolerate trailing white space before the terminating <CRLF>.
WSP = SP / HTAB ; white space
--> Trim the end of the buffer
*/
mp = p;
if (mp > parser->cur) {
while (mp > parser->cur && (*(mp-1) == ' ' || *(mp-1) == '\t'))
mp--;
}
if (!parser->auth_response && mp > parser->cur && *parser->cur == ' ') {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BAD_COMMAND,
"Duplicate space after command name");
return -1;
}
parser->state.cmd_params = i_strdup_until(parser->cur, mp);
parser->cur = p;
parser->state.poff = 0;
return 1;
}
|
290259003832263006605358025642064625220
|
None
|
CWE-835
|
CVE-2020-7046
|
lib-smtp in submission-login and lmtp in Dovecot 2.3.9 before 2.3.9.3 mishandles truncated UTF-8 data in command parameters, as demonstrated by the unauthenticated triggering of a submission-login infinite loop.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-7046
|
|
216,800
|
core
|
69ad3c902ea4bbf9f21ab1857d8923f975dc6145
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/69ad3c902ea4bbf9f21ab1857d8923f975dc6145
|
auth: mech-rpa - Fail on zero len buffer
| 1
|
rpa_read_buffer(pool_t pool, const unsigned char **data,
const unsigned char *end, unsigned char **buffer)
{
const unsigned char *p = *data;
unsigned int len;
if (p > end)
return 0;
len = *p++;
if (p + len > end)
return 0;
*buffer = p_malloc(pool, len);
memcpy(*buffer, p, len);
*data += 1 + len;
return len;
}
|
29472874834578262075169131407750507339
|
None
|
CWE-125
|
CVE-2020-12674
|
In Dovecot before 2.3.11.3, sending a specially formatted RPA request will crash the auth service because a length of zero is mishandled.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-12674
|
|
506,428
|
core
|
69ad3c902ea4bbf9f21ab1857d8923f975dc6145
|
https://github.com/LibreOffice/core
|
https://github.com/dovecot/core/commit/69ad3c902ea4bbf9f21ab1857d8923f975dc6145
|
auth: mech-rpa - Fail on zero len buffer
| 0
|
rpa_read_buffer(pool_t pool, const unsigned char **data,
const unsigned char *end, unsigned char **buffer)
{
const unsigned char *p = *data;
unsigned int len;
if (p > end)
return 0;
len = *p++;
if (p + len > end || len == 0)
return 0;
*buffer = p_malloc(pool, len);
memcpy(*buffer, p, len);
*data += 1 + len;
return len;
}
|
42033596058129329438262094419912462932
|
None
|
CWE-125
|
CVE-2020-12674
|
In Dovecot before 2.3.11.3, sending a specially formatted RPA request will crash the auth service because a length of zero is mishandled.
|
https://nvd.nist.gov/vuln/detail/CVE-2020-12674
|
|
216,812
|
openssl
|
97ab3c4b538840037812c8d9164d09a1f4bf11a1
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/97ab3c4b538840037812c8d9164d09a1f4bf11a1
|
Add a test for GENERAL_NAME_cmp
Based on a boringssl test contributed by David Benjamin
Reviewed-by: Tomas Mraz <[email protected]>
| 1
|
int setup_tests(void)
{
ADD_ALL_TESTS(call_run_cert, OSSL_NELEM(name_fns));
return 1;
}
|
99734093470067216462803129850987630150
|
None
|
CWE-476
|
CVE-2020-1971
|
The X.509 GeneralName type is a generic type for representing different types of names. One of those name types is known as EDIPartyName. OpenSSL provides a function GENERAL_NAME_cmp which compares different instances of a GENERAL_NAME to see if they are equal or not. This function behaves incorrectly when both GENERAL_NAMEs contain an EDIPARTYNAME. A NULL pointer dereference and a crash may occur leading to a possible denial of service attack. OpenSSL itself uses the GENERAL_NAME_cmp function for two purposes: 1) Comparing CRL distribution point names between an available CRL and a CRL distribution point embedded in an X509 certificate 2) When verifying that a timestamp response token signer matches the timestamp authority name (exposed via the API functions TS_RESP_verify_response and TS_RESP_verify_token) If an attacker can control both items being compared then that attacker could trigger a crash. For example if the attacker can trick a client or server into checking a malicious certificate against a malicious CRL then this may occur. Note that some applications automatically download CRLs based on a URL embedded in a certificate. This checking happens prior to the signatures on the certificate and CRL being verified. OpenSSL's s_server, s_client and verify tools have support for the "-crl_download" option which implements automatic CRL downloading and this attack has been demonstrated to work against those tools. Note that an unrelated bug means that affected versions of OpenSSL cannot parse or construct correct encodings of EDIPARTYNAME. However it is possible to construct a malformed EDIPARTYNAME that OpenSSL's parser will accept and hence trigger this attack. All OpenSSL 1.1.1 and 1.0.2 versions are affected by this issue. Other OpenSSL releases are out of support and have not been checked. Fixed in OpenSSL 1.1.1i (Affected 1.1.1-1.1.1h). Fixed in OpenSSL 1.0.2x (Affected 1.0.2-1.0.2w).
|
https://nvd.nist.gov/vuln/detail/CVE-2020-1971
|
|
506,696
|
openssl
|
97ab3c4b538840037812c8d9164d09a1f4bf11a1
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/97ab3c4b538840037812c8d9164d09a1f4bf11a1
|
Add a test for GENERAL_NAME_cmp
Based on a boringssl test contributed by David Benjamin
Reviewed-by: Tomas Mraz <[email protected]>
| 0
|
int setup_tests(void)
{
ADD_ALL_TESTS(call_run_cert, OSSL_NELEM(name_fns));
ADD_TEST(test_GENERAL_NAME_cmp);
return 1;
}
|
300508003271249901349188890485879920788
|
None
|
CWE-476
|
CVE-2020-1971
|
The X.509 GeneralName type is a generic type for representing different types of names. One of those name types is known as EDIPartyName. OpenSSL provides a function GENERAL_NAME_cmp which compares different instances of a GENERAL_NAME to see if they are equal or not. This function behaves incorrectly when both GENERAL_NAMEs contain an EDIPARTYNAME. A NULL pointer dereference and a crash may occur leading to a possible denial of service attack. OpenSSL itself uses the GENERAL_NAME_cmp function for two purposes: 1) Comparing CRL distribution point names between an available CRL and a CRL distribution point embedded in an X509 certificate 2) When verifying that a timestamp response token signer matches the timestamp authority name (exposed via the API functions TS_RESP_verify_response and TS_RESP_verify_token) If an attacker can control both items being compared then that attacker could trigger a crash. For example if the attacker can trick a client or server into checking a malicious certificate against a malicious CRL then this may occur. Note that some applications automatically download CRLs based on a URL embedded in a certificate. This checking happens prior to the signatures on the certificate and CRL being verified. OpenSSL's s_server, s_client and verify tools have support for the "-crl_download" option which implements automatic CRL downloading and this attack has been demonstrated to work against those tools. Note that an unrelated bug means that affected versions of OpenSSL cannot parse or construct correct encodings of EDIPARTYNAME. However it is possible to construct a malformed EDIPARTYNAME that OpenSSL's parser will accept and hence trigger this attack. All OpenSSL 1.1.1 and 1.0.2 versions are affected by this issue. Other OpenSSL releases are out of support and have not been checked. Fixed in OpenSSL 1.1.1i (Affected 1.1.1-1.1.1h). Fixed in OpenSSL 1.0.2x (Affected 1.0.2-1.0.2w).
|
https://nvd.nist.gov/vuln/detail/CVE-2020-1971
|
|
216,861
|
openssl
|
94d23fcff9b2a7a8368dfe52214d5c2569882c11
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/94d23fcff9b2a7a8368dfe52214d5c2569882c11
|
Fix EC_GROUP_new_from_ecparameters to check the base length
Check that there's at least one byte in params->base before trying to
read it.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
| 1
|
EC_GROUP *EC_GROUP_new_from_ecparameters(const ECPARAMETERS *params)
{
int ok = 0, tmp;
EC_GROUP *ret = NULL, *dup = NULL;
BIGNUM *p = NULL, *a = NULL, *b = NULL;
EC_POINT *point = NULL;
long field_bits;
int curve_name = NID_undef;
BN_CTX *ctx = NULL;
if (!params->fieldID || !params->fieldID->fieldType ||
!params->fieldID->p.ptr) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
/*
* Now extract the curve parameters a and b. Note that, although SEC 1
* specifies the length of their encodings, historical versions of OpenSSL
* encoded them incorrectly, so we must accept any length for backwards
* compatibility.
*/
if (!params->curve || !params->curve->a ||
!params->curve->a->data || !params->curve->b ||
!params->curve->b->data) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
a = BN_bin2bn(params->curve->a->data, params->curve->a->length, NULL);
if (a == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
b = BN_bin2bn(params->curve->b->data, params->curve->b->length, NULL);
if (b == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
/* get the field parameters */
tmp = OBJ_obj2nid(params->fieldID->fieldType);
if (tmp == NID_X9_62_characteristic_two_field)
#ifdef OPENSSL_NO_EC2M
{
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_GF2M_NOT_SUPPORTED);
goto err;
}
#else
{
X9_62_CHARACTERISTIC_TWO *char_two;
char_two = params->fieldID->p.char_two;
field_bits = char_two->m;
if (field_bits > OPENSSL_ECC_MAX_FIELD_BITS) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_FIELD_TOO_LARGE);
goto err;
}
if ((p = BN_new()) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the base type */
tmp = OBJ_obj2nid(char_two->type);
if (tmp == NID_X9_62_tpBasis) {
long tmp_long;
if (!char_two->p.tpBasis) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
tmp_long = ASN1_INTEGER_get(char_two->p.tpBasis);
if (!(char_two->m > tmp_long && tmp_long > 0)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS,
EC_R_INVALID_TRINOMIAL_BASIS);
goto err;
}
/* create the polynomial */
if (!BN_set_bit(p, (int)char_two->m))
goto err;
if (!BN_set_bit(p, (int)tmp_long))
goto err;
if (!BN_set_bit(p, 0))
goto err;
} else if (tmp == NID_X9_62_ppBasis) {
X9_62_PENTANOMIAL *penta;
penta = char_two->p.ppBasis;
if (!penta) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
if (!
(char_two->m > penta->k3 && penta->k3 > penta->k2
&& penta->k2 > penta->k1 && penta->k1 > 0)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS,
EC_R_INVALID_PENTANOMIAL_BASIS);
goto err;
}
/* create the polynomial */
if (!BN_set_bit(p, (int)char_two->m))
goto err;
if (!BN_set_bit(p, (int)penta->k1))
goto err;
if (!BN_set_bit(p, (int)penta->k2))
goto err;
if (!BN_set_bit(p, (int)penta->k3))
goto err;
if (!BN_set_bit(p, 0))
goto err;
} else if (tmp == NID_X9_62_onBasis) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_NOT_IMPLEMENTED);
goto err;
} else { /* error */
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
/* create the EC_GROUP structure */
ret = EC_GROUP_new_curve_GF2m(p, a, b, NULL);
}
#endif
else if (tmp == NID_X9_62_prime_field) {
/* we have a curve over a prime field */
/* extract the prime number */
if (!params->fieldID->p.prime) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
p = ASN1_INTEGER_to_BN(params->fieldID->p.prime, NULL);
if (p == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
if (BN_is_negative(p) || BN_is_zero(p)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_FIELD);
goto err;
}
field_bits = BN_num_bits(p);
if (field_bits > OPENSSL_ECC_MAX_FIELD_BITS) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_FIELD_TOO_LARGE);
goto err;
}
/* create the EC_GROUP structure */
ret = EC_GROUP_new_curve_GFp(p, a, b, NULL);
} else {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_FIELD);
goto err;
}
if (ret == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/* extract seed (optional) */
if (params->curve->seed != NULL) {
OPENSSL_free(ret->seed);
if ((ret->seed = OPENSSL_malloc(params->curve->seed->length)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(ret->seed, params->curve->seed->data,
params->curve->seed->length);
ret->seed_len = params->curve->seed->length;
}
if (!params->order || !params->base || !params->base->data) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
if ((point = EC_POINT_new(ret)) == NULL)
goto err;
/* set the point conversion form */
EC_GROUP_set_point_conversion_form(ret, (point_conversion_form_t)
(params->base->data[0] & ~0x01));
/* extract the ec point */
if (!EC_POINT_oct2point(ret, point, params->base->data,
params->base->length, NULL)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/* extract the order */
if ((a = ASN1_INTEGER_to_BN(params->order, a)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
if (BN_is_negative(a) || BN_is_zero(a)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_GROUP_ORDER);
goto err;
}
if (BN_num_bits(a) > (int)field_bits + 1) { /* Hasse bound */
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_GROUP_ORDER);
goto err;
}
/* extract the cofactor (optional) */
if (params->cofactor == NULL) {
BN_free(b);
b = NULL;
} else if ((b = ASN1_INTEGER_to_BN(params->cofactor, b)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
/* set the generator, order and cofactor (if present) */
if (!EC_GROUP_set_generator(ret, point, a, b)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/*
* Check if the explicit parameters group just created matches one of the
* built-in curves.
*
* We create a copy of the group just built, so that we can remove optional
* fields for the lookup: we do this to avoid the possibility that one of
* the optional parameters is used to force the library into using a less
* performant and less secure EC_METHOD instead of the specialized one.
* In any case, `seed` is not really used in any computation, while a
* cofactor different from the one in the built-in table is just
* mathematically wrong anyway and should not be used.
*/
if ((ctx = BN_CTX_new()) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
if ((dup = EC_GROUP_dup(ret)) == NULL
|| EC_GROUP_set_seed(dup, NULL, 0) != 1
|| !EC_GROUP_set_generator(dup, point, a, NULL)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
if ((curve_name = ec_curve_nid_from_params(dup, ctx)) != NID_undef) {
/*
* The input explicit parameters successfully matched one of the
* built-in curves: often for built-in curves we have specialized
* methods with better performance and hardening.
*
* In this case we replace the `EC_GROUP` created through explicit
* parameters with one created from a named group.
*/
EC_GROUP *named_group = NULL;
#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
/*
* NID_wap_wsg_idm_ecid_wtls12 and NID_secp224r1 are both aliases for
* the same curve, we prefer the SECP nid when matching explicit
* parameters as that is associated with a specialized EC_METHOD.
*/
if (curve_name == NID_wap_wsg_idm_ecid_wtls12)
curve_name = NID_secp224r1;
#endif /* !def(OPENSSL_NO_EC_NISTP_64_GCC_128) */
if ((named_group = EC_GROUP_new_by_curve_name(curve_name)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
EC_GROUP_free(ret);
ret = named_group;
/*
* Set the flag so that EC_GROUPs created from explicit parameters are
* serialized using explicit parameters by default.
*/
EC_GROUP_set_asn1_flag(ret, OPENSSL_EC_EXPLICIT_CURVE);
/*
* If the input params do not contain the optional seed field we make
* sure it is not added to the returned group.
*
* The seed field is not really used inside libcrypto anyway, and
* adding it to parsed explicit parameter keys would alter their DER
* encoding output (because of the extra field) which could impact
* applications fingerprinting keys by their DER encoding.
*/
if (params->curve->seed == NULL) {
if (EC_GROUP_set_seed(ret, NULL, 0) != 1)
goto err;
}
}
ok = 1;
err:
if (!ok) {
EC_GROUP_free(ret);
ret = NULL;
}
EC_GROUP_free(dup);
BN_free(p);
BN_free(a);
BN_free(b);
EC_POINT_free(point);
BN_CTX_free(ctx);
return ret;
}
|
80540639516178112864890741931948685257
|
None
|
CWE-125
|
CVE-2021-3712
|
ASN.1 strings are represented internally within OpenSSL as an ASN1_STRING structure which contains a buffer holding the string data and a field holding the buffer length. This contrasts with normal C strings which are repesented as a buffer for the string data which is terminated with a NUL (0) byte. Although not a strict requirement, ASN.1 strings that are parsed using OpenSSL's own "d2i" functions (and other similar parsing functions) as well as any string whose value has been set with the ASN1_STRING_set() function will additionally NUL terminate the byte array in the ASN1_STRING structure. However, it is possible for applications to directly construct valid ASN1_STRING structures which do not NUL terminate the byte array by directly setting the "data" and "length" fields in the ASN1_STRING array. This can also happen by using the ASN1_STRING_set0() function. Numerous OpenSSL functions that print ASN.1 data have been found to assume that the ASN1_STRING byte array will be NUL terminated, even though this is not guaranteed for strings that have been directly constructed. Where an application requests an ASN.1 structure to be printed, and where that ASN.1 structure contains ASN1_STRINGs that have been directly constructed by the application without NUL terminating the "data" field, then a read buffer overrun can occur. The same thing can also occur during name constraints processing of certificates (for example if a certificate has been directly constructed by the application instead of loading it via the OpenSSL parsing functions, and the certificate contains non NUL terminated ASN1_STRING structures). It can also occur in the X509_get1_email(), X509_REQ_get1_email() and X509_get1_ocsp() functions. If a malicious actor can cause an application to directly construct an ASN1_STRING and then process it through one of the affected OpenSSL functions then this issue could be hit. This might result in a crash (causing a Denial of Service attack). It could also result in the disclosure of private memory contents (such as private keys, or sensitive plaintext). Fixed in OpenSSL 1.1.1l (Affected 1.1.1-1.1.1k). Fixed in OpenSSL 1.0.2za (Affected 1.0.2-1.0.2y).
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3712
|
|
507,778
|
openssl
|
94d23fcff9b2a7a8368dfe52214d5c2569882c11
|
https://github.com/openssl/openssl
|
https://github.com/openssl/openssl/commit/94d23fcff9b2a7a8368dfe52214d5c2569882c11
|
Fix EC_GROUP_new_from_ecparameters to check the base length
Check that there's at least one byte in params->base before trying to
read it.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]>
| 0
|
EC_GROUP *EC_GROUP_new_from_ecparameters(const ECPARAMETERS *params)
{
int ok = 0, tmp;
EC_GROUP *ret = NULL, *dup = NULL;
BIGNUM *p = NULL, *a = NULL, *b = NULL;
EC_POINT *point = NULL;
long field_bits;
int curve_name = NID_undef;
BN_CTX *ctx = NULL;
if (!params->fieldID || !params->fieldID->fieldType ||
!params->fieldID->p.ptr) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
/*
* Now extract the curve parameters a and b. Note that, although SEC 1
* specifies the length of their encodings, historical versions of OpenSSL
* encoded them incorrectly, so we must accept any length for backwards
* compatibility.
*/
if (!params->curve || !params->curve->a ||
!params->curve->a->data || !params->curve->b ||
!params->curve->b->data) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
a = BN_bin2bn(params->curve->a->data, params->curve->a->length, NULL);
if (a == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
b = BN_bin2bn(params->curve->b->data, params->curve->b->length, NULL);
if (b == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
/* get the field parameters */
tmp = OBJ_obj2nid(params->fieldID->fieldType);
if (tmp == NID_X9_62_characteristic_two_field)
#ifdef OPENSSL_NO_EC2M
{
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_GF2M_NOT_SUPPORTED);
goto err;
}
#else
{
X9_62_CHARACTERISTIC_TWO *char_two;
char_two = params->fieldID->p.char_two;
field_bits = char_two->m;
if (field_bits > OPENSSL_ECC_MAX_FIELD_BITS) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_FIELD_TOO_LARGE);
goto err;
}
if ((p = BN_new()) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the base type */
tmp = OBJ_obj2nid(char_two->type);
if (tmp == NID_X9_62_tpBasis) {
long tmp_long;
if (!char_two->p.tpBasis) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
tmp_long = ASN1_INTEGER_get(char_two->p.tpBasis);
if (!(char_two->m > tmp_long && tmp_long > 0)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS,
EC_R_INVALID_TRINOMIAL_BASIS);
goto err;
}
/* create the polynomial */
if (!BN_set_bit(p, (int)char_two->m))
goto err;
if (!BN_set_bit(p, (int)tmp_long))
goto err;
if (!BN_set_bit(p, 0))
goto err;
} else if (tmp == NID_X9_62_ppBasis) {
X9_62_PENTANOMIAL *penta;
penta = char_two->p.ppBasis;
if (!penta) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
if (!
(char_two->m > penta->k3 && penta->k3 > penta->k2
&& penta->k2 > penta->k1 && penta->k1 > 0)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS,
EC_R_INVALID_PENTANOMIAL_BASIS);
goto err;
}
/* create the polynomial */
if (!BN_set_bit(p, (int)char_two->m))
goto err;
if (!BN_set_bit(p, (int)penta->k1))
goto err;
if (!BN_set_bit(p, (int)penta->k2))
goto err;
if (!BN_set_bit(p, (int)penta->k3))
goto err;
if (!BN_set_bit(p, 0))
goto err;
} else if (tmp == NID_X9_62_onBasis) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_NOT_IMPLEMENTED);
goto err;
} else { /* error */
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
/* create the EC_GROUP structure */
ret = EC_GROUP_new_curve_GF2m(p, a, b, NULL);
}
#endif
else if (tmp == NID_X9_62_prime_field) {
/* we have a curve over a prime field */
/* extract the prime number */
if (!params->fieldID->p.prime) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
p = ASN1_INTEGER_to_BN(params->fieldID->p.prime, NULL);
if (p == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
if (BN_is_negative(p) || BN_is_zero(p)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_FIELD);
goto err;
}
field_bits = BN_num_bits(p);
if (field_bits > OPENSSL_ECC_MAX_FIELD_BITS) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_FIELD_TOO_LARGE);
goto err;
}
/* create the EC_GROUP structure */
ret = EC_GROUP_new_curve_GFp(p, a, b, NULL);
} else {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_FIELD);
goto err;
}
if (ret == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/* extract seed (optional) */
if (params->curve->seed != NULL) {
OPENSSL_free(ret->seed);
if ((ret->seed = OPENSSL_malloc(params->curve->seed->length)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(ret->seed, params->curve->seed->data,
params->curve->seed->length);
ret->seed_len = params->curve->seed->length;
}
if (params->order == NULL
|| params->base == NULL
|| params->base->data == NULL
|| params->base->length == 0) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_ASN1_ERROR);
goto err;
}
if ((point = EC_POINT_new(ret)) == NULL)
goto err;
/* set the point conversion form */
EC_GROUP_set_point_conversion_form(ret, (point_conversion_form_t)
(params->base->data[0] & ~0x01));
/* extract the ec point */
if (!EC_POINT_oct2point(ret, point, params->base->data,
params->base->length, NULL)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/* extract the order */
if ((a = ASN1_INTEGER_to_BN(params->order, a)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
if (BN_is_negative(a) || BN_is_zero(a)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_GROUP_ORDER);
goto err;
}
if (BN_num_bits(a) > (int)field_bits + 1) { /* Hasse bound */
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, EC_R_INVALID_GROUP_ORDER);
goto err;
}
/* extract the cofactor (optional) */
if (params->cofactor == NULL) {
BN_free(b);
b = NULL;
} else if ((b = ASN1_INTEGER_to_BN(params->cofactor, b)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_ASN1_LIB);
goto err;
}
/* set the generator, order and cofactor (if present) */
if (!EC_GROUP_set_generator(ret, point, a, b)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
/*
* Check if the explicit parameters group just created matches one of the
* built-in curves.
*
* We create a copy of the group just built, so that we can remove optional
* fields for the lookup: we do this to avoid the possibility that one of
* the optional parameters is used to force the library into using a less
* performant and less secure EC_METHOD instead of the specialized one.
* In any case, `seed` is not really used in any computation, while a
* cofactor different from the one in the built-in table is just
* mathematically wrong anyway and should not be used.
*/
if ((ctx = BN_CTX_new()) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_BN_LIB);
goto err;
}
if ((dup = EC_GROUP_dup(ret)) == NULL
|| EC_GROUP_set_seed(dup, NULL, 0) != 1
|| !EC_GROUP_set_generator(dup, point, a, NULL)) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
if ((curve_name = ec_curve_nid_from_params(dup, ctx)) != NID_undef) {
/*
* The input explicit parameters successfully matched one of the
* built-in curves: often for built-in curves we have specialized
* methods with better performance and hardening.
*
* In this case we replace the `EC_GROUP` created through explicit
* parameters with one created from a named group.
*/
EC_GROUP *named_group = NULL;
#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
/*
* NID_wap_wsg_idm_ecid_wtls12 and NID_secp224r1 are both aliases for
* the same curve, we prefer the SECP nid when matching explicit
* parameters as that is associated with a specialized EC_METHOD.
*/
if (curve_name == NID_wap_wsg_idm_ecid_wtls12)
curve_name = NID_secp224r1;
#endif /* !def(OPENSSL_NO_EC_NISTP_64_GCC_128) */
if ((named_group = EC_GROUP_new_by_curve_name(curve_name)) == NULL) {
ECerr(EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS, ERR_R_EC_LIB);
goto err;
}
EC_GROUP_free(ret);
ret = named_group;
/*
* Set the flag so that EC_GROUPs created from explicit parameters are
* serialized using explicit parameters by default.
*/
EC_GROUP_set_asn1_flag(ret, OPENSSL_EC_EXPLICIT_CURVE);
/*
* If the input params do not contain the optional seed field we make
* sure it is not added to the returned group.
*
* The seed field is not really used inside libcrypto anyway, and
* adding it to parsed explicit parameter keys would alter their DER
* encoding output (because of the extra field) which could impact
* applications fingerprinting keys by their DER encoding.
*/
if (params->curve->seed == NULL) {
if (EC_GROUP_set_seed(ret, NULL, 0) != 1)
goto err;
}
}
ok = 1;
err:
if (!ok) {
EC_GROUP_free(ret);
ret = NULL;
}
EC_GROUP_free(dup);
BN_free(p);
BN_free(a);
BN_free(b);
EC_POINT_free(point);
BN_CTX_free(ctx);
return ret;
}
|
230738893451780669362858497865794698672
|
None
|
CWE-125
|
CVE-2021-3712
|
ASN.1 strings are represented internally within OpenSSL as an ASN1_STRING structure which contains a buffer holding the string data and a field holding the buffer length. This contrasts with normal C strings which are repesented as a buffer for the string data which is terminated with a NUL (0) byte. Although not a strict requirement, ASN.1 strings that are parsed using OpenSSL's own "d2i" functions (and other similar parsing functions) as well as any string whose value has been set with the ASN1_STRING_set() function will additionally NUL terminate the byte array in the ASN1_STRING structure. However, it is possible for applications to directly construct valid ASN1_STRING structures which do not NUL terminate the byte array by directly setting the "data" and "length" fields in the ASN1_STRING array. This can also happen by using the ASN1_STRING_set0() function. Numerous OpenSSL functions that print ASN.1 data have been found to assume that the ASN1_STRING byte array will be NUL terminated, even though this is not guaranteed for strings that have been directly constructed. Where an application requests an ASN.1 structure to be printed, and where that ASN.1 structure contains ASN1_STRINGs that have been directly constructed by the application without NUL terminating the "data" field, then a read buffer overrun can occur. The same thing can also occur during name constraints processing of certificates (for example if a certificate has been directly constructed by the application instead of loading it via the OpenSSL parsing functions, and the certificate contains non NUL terminated ASN1_STRING structures). It can also occur in the X509_get1_email(), X509_REQ_get1_email() and X509_get1_ocsp() functions. If a malicious actor can cause an application to directly construct an ASN1_STRING and then process it through one of the affected OpenSSL functions then this issue could be hit. This might result in a crash (causing a Denial of Service attack). It could also result in the disclosure of private memory contents (such as private keys, or sensitive plaintext). Fixed in OpenSSL 1.1.1l (Affected 1.1.1-1.1.1k). Fixed in OpenSSL 1.0.2za (Affected 1.0.2-1.0.2y).
|
https://nvd.nist.gov/vuln/detail/CVE-2021-3712
|
|
216,903
|
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/3a52569499e2f0c4d1f25db1e81617a9d9755400
|
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
| 1
|
bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
{
SELECT_LEX_UNIT *next_unit= NULL;
for (SELECT_LEX_UNIT *un= first_inner_unit();
un;
un= next_unit ? next_unit : un->next_unit())
{
Item_subselect *subquery_predicate= un->item;
next_unit= NULL;
if (subquery_predicate)
{
if (!subquery_predicate->fixed)
{
/*
This subquery was excluded as part of some expression so it is
invisible from all prepared expression.
*/
next_unit= un->next_unit();
un->exclude_level();
if (next_unit)
continue;
break;
}
if (subquery_predicate->substype() == Item_subselect::IN_SUBS)
{
Item_in_subselect *in_subs= (Item_in_subselect*) subquery_predicate;
if (in_subs->is_jtbm_merged)
continue;
}
if (const_only && !subquery_predicate->const_item())
{
/* Skip non-constant subqueries if the caller asked so. */
continue;
}
bool empty_union_result= true;
bool is_correlated_unit= false;
bool first= true;
bool union_plan_saved= false;
/*
If the subquery is a UNION, optimize all the subqueries in the UNION. If
there is no UNION, then the loop will execute once for the subquery.
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
{
JOIN *inner_join= sl->join;
if (first)
first= false;
else
{
if (!union_plan_saved)
{
union_plan_saved= true;
if (un->save_union_explain(un->thd->lex->explain))
return true; /* Failure */
}
}
if (!inner_join)
continue;
SELECT_LEX *save_select= un->thd->lex->current_select;
ulonglong save_options;
int res;
/* We need only 1 row to determine existence */
un->set_limit(un->global_parameters());
un->thd->lex->current_select= sl;
save_options= inner_join->select_options;
if (options & SELECT_DESCRIBE)
{
/* Optimize the subquery in the context of EXPLAIN. */
sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
if ((res= inner_join->optimize()))
return TRUE;
if (!inner_join->cleaned)
sl->update_used_tables();
sl->update_correlated_cache();
is_correlated_unit|= sl->is_correlated;
inner_join->select_options= save_options;
un->thd->lex->current_select= save_select;
Explain_query *eq;
if ((eq= inner_join->thd->lex->explain))
{
Explain_select *expl_sel;
if ((expl_sel= eq->get_select(inner_join->select_lex->select_number)))
{
sl->set_explain_type(TRUE);
expl_sel->select_type= sl->type;
}
}
if (empty_union_result)
{
/*
If at least one subquery in a union is non-empty, the UNION result
is non-empty. If there is no UNION, the only subquery is non-empy.
*/
empty_union_result= inner_join->empty_result();
}
if (res)
return TRUE;
}
if (empty_union_result)
subquery_predicate->no_rows_in_result();
if (!is_correlated_unit)
un->uncacheable&= ~UNCACHEABLE_DEPENDENT;
subquery_predicate->is_correlated= is_correlated_unit;
}
}
return FALSE;
}
|
170977446789514176182113064369700449502
|
None
|
CWE-476
|
CVE-2021-46664
|
MariaDB through 10.5.9 allows an application crash in sub_select_postjoin_aggr for a NULL value of aggr.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46664
|
|
508,874
|
server
|
3a52569499e2f0c4d1f25db1e81617a9d9755400
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/3a52569499e2f0c4d1f25db1e81617a9d9755400
|
MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times.
| 0
|
bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
{
SELECT_LEX_UNIT *next_unit= NULL;
for (SELECT_LEX_UNIT *un= first_inner_unit();
un;
un= next_unit ? next_unit : un->next_unit())
{
Item_subselect *subquery_predicate= un->item;
next_unit= NULL;
if (subquery_predicate)
{
if (!subquery_predicate->fixed)
{
/*
This subquery was excluded as part of some expression so it is
invisible from all prepared expression.
*/
next_unit= un->next_unit();
un->exclude_level();
if (next_unit)
continue;
break;
}
if (subquery_predicate->substype() == Item_subselect::IN_SUBS)
{
Item_in_subselect *in_subs= (Item_in_subselect*) subquery_predicate;
if (in_subs->is_jtbm_merged)
continue;
}
if (const_only && !subquery_predicate->const_item())
{
/* Skip non-constant subqueries if the caller asked so. */
continue;
}
bool empty_union_result= true;
bool is_correlated_unit= false;
bool first= true;
bool union_plan_saved= false;
/*
If the subquery is a UNION, optimize all the subqueries in the UNION. If
there is no UNION, then the loop will execute once for the subquery.
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
{
JOIN *inner_join= sl->join;
if (first)
first= false;
else
{
if (!union_plan_saved)
{
union_plan_saved= true;
if (un->save_union_explain(un->thd->lex->explain))
return true; /* Failure */
}
}
if (!inner_join)
continue;
SELECT_LEX *save_select= un->thd->lex->current_select;
ulonglong save_options;
int res;
/* We need only 1 row to determine existence */
un->set_limit(un->global_parameters());
un->thd->lex->current_select= sl;
save_options= inner_join->select_options;
if (options & SELECT_DESCRIBE)
{
/* Optimize the subquery in the context of EXPLAIN. */
sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
if ((res= inner_join->optimize()))
return TRUE;
if (!inner_join->cleaned)
sl->update_used_tables();
sl->update_correlated_cache();
is_correlated_unit|= sl->is_correlated;
inner_join->select_options= save_options;
un->thd->lex->current_select= save_select;
Explain_query *eq;
if ((eq= inner_join->thd->lex->explain))
{
Explain_select *expl_sel;
if ((expl_sel= eq->get_select(inner_join->select_lex->select_number)))
{
sl->set_explain_type(TRUE);
expl_sel->select_type= sl->type;
}
}
if (empty_union_result)
{
/*
If at least one subquery in a union is non-empty, the UNION result
is non-empty. If there is no UNION, the only subquery is non-empy.
*/
empty_union_result= inner_join->empty_result();
}
if (res)
return TRUE;
}
if (empty_union_result)
subquery_predicate->no_rows_in_result();
if (is_correlated_unit)
{
/*
Some parts of UNION are not correlated. This means we will need to
re-execute the whole UNION every time. Mark all parts of the UNION
as correlated so that they are prepared to be executed multiple
times (if we don't do that, some part of the UNION may free its
execution data at the end of first execution and crash on the second
execution)
*/
for (SELECT_LEX *sl= un->first_select(); sl; sl= sl->next_select())
sl->uncacheable |= UNCACHEABLE_DEPENDENT;
}
else
un->uncacheable&= ~UNCACHEABLE_DEPENDENT;
subquery_predicate->is_correlated= is_correlated_unit;
}
}
return FALSE;
}
|
335655854636733648424487878771712666992
|
None
|
CWE-476
|
CVE-2021-46664
|
MariaDB through 10.5.9 allows an application crash in sub_select_postjoin_aggr for a NULL value of aggr.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46664
|
|
216,906
|
server
|
9e39d0ae44595dbd1570805d97c9c874778a6be8
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/9e39d0ae44595dbd1570805d97c9c874778a6be8
|
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields
fix a debug assert to account for not opened temp tables
| 1
|
void ha_maria::drop_table(const char *name)
{
DBUG_ASSERT(file->s->temporary);
(void) ha_close();
(void) maria_delete_table_files(name, 1, MY_WME);
}
|
85612271673629964630565572694774133988
|
None
|
CWE-400
|
CVE-2021-46668
|
MariaDB through 10.5.9 allows an application crash via certain long SELECT DISTINCT statements that improperly interact with storage-engine resource limitations for temporary data structures.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46668
|
|
509,510
|
server
|
9e39d0ae44595dbd1570805d97c9c874778a6be8
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/9e39d0ae44595dbd1570805d97c9c874778a6be8
|
MDEV-25787 Bug report: crash on SELECT DISTINCT thousands_blob_fields
fix a debug assert to account for not opened temp tables
| 0
|
void ha_maria::drop_table(const char *name)
{
DBUG_ASSERT(!file || file->s->temporary);
(void) ha_close();
(void) maria_delete_table_files(name, 1, MY_WME);
}
|
36457166293746144750064590645990572247
|
None
|
CWE-400
|
CVE-2021-46668
|
MariaDB through 10.5.9 allows an application crash via certain long SELECT DISTINCT statements that improperly interact with storage-engine resource limitations for temporary data structures.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46668
|
|
216,938
|
server
|
b3c3291f0b7c1623cb20663f7cf31b7f749768bc
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/b3c3291f0b7c1623cb20663f7cf31b7f749768bc
|
MDEV-24176 fixup: GCC -Wmaybe-uninitialized
| 1
|
bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
{
TABLE *table;
const char *key;
uint key_length;
const char *alias= table_list->alias.str;
uint flags= ot_ctx->get_flags();
MDL_ticket *mdl_ticket;
TABLE_SHARE *share;
uint gts_flags;
bool from_share= false;
#ifdef WITH_PARTITION_STORAGE_ENGINE
int part_names_error=0;
#endif
DBUG_ENTER("open_table");
/*
The table must not be opened already. The table can be pre-opened for
some statements if it is a temporary table.
open_temporary_table() must be used to open temporary tables.
*/
DBUG_ASSERT(!table_list->table);
/* an open table operation needs a lot of the stack space */
if (check_stack_overrun(thd, STACK_MIN_SIZE_FOR_OPEN, (uchar *)&alias))
DBUG_RETURN(TRUE);
if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed)
{
thd->send_kill_message();
DBUG_RETURN(TRUE);
}
/*
Check if we're trying to take a write lock in a read only transaction.
Note that we allow write locks on log tables as otherwise logging
to general/slow log would be disabled in read only transactions.
*/
if (table_list->mdl_request.is_write_lock_request() &&
thd->tx_read_only &&
!(flags & (MYSQL_LOCK_LOG_TABLE | MYSQL_OPEN_HAS_MDL_LOCK)))
{
my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
DBUG_RETURN(true);
}
if (!table_list->db.str)
{
my_error(ER_NO_DB_ERROR, MYF(0));
DBUG_RETURN(true);
}
key_length= get_table_def_key(table_list, &key);
/*
If we're in pre-locked or LOCK TABLES mode, let's try to find the
requested table in the list of pre-opened and locked tables. If the
table is not there, return an error - we can't open not pre-opened
tables in pre-locked/LOCK TABLES mode.
TODO: move this block into a separate function.
*/
if (thd->locked_tables_mode &&
! (flags & MYSQL_OPEN_GET_NEW_TABLE))
{ // Using table locks
TABLE *best_table= 0;
int best_distance= INT_MIN;
for (table=thd->open_tables; table ; table=table->next)
{
if (table->s->table_cache_key.length == key_length &&
!memcmp(table->s->table_cache_key.str, key, key_length))
{
if (!my_strcasecmp(system_charset_info, table->alias.c_ptr(), alias) &&
table->query_id != thd->query_id && /* skip tables already used */
(thd->locked_tables_mode == LTM_LOCK_TABLES ||
table->query_id == 0))
{
int distance= ((int) table->reginfo.lock_type -
(int) table_list->lock_type);
/*
Find a table that either has the exact lock type requested,
or has the best suitable lock. In case there is no locked
table that has an equal or higher lock than requested,
we us the closest matching lock to be able to produce an error
message about wrong lock mode on the table. The best_table
is changed if bd < 0 <= d or bd < d < 0 or 0 <= d < bd.
distance < 0 - No suitable lock found
distance > 0 - we have lock mode higher then we require
distance == 0 - we have lock mode exactly which we need
*/
if ((best_distance < 0 && distance > best_distance) ||
(distance >= 0 && distance < best_distance))
{
best_distance= distance;
best_table= table;
if (best_distance == 0)
{
/*
We have found a perfect match and can finish iterating
through open tables list. Check for table use conflict
between calling statement and SP/trigger is done in
lock_tables().
*/
break;
}
}
}
}
}
if (best_table)
{
table= best_table;
table->query_id= thd->query_id;
table->init(thd, table_list);
DBUG_PRINT("info",("Using locked table"));
#ifdef WITH_PARTITION_STORAGE_ENGINE
part_names_error= set_partitions_as_used(table_list, table);
#endif
goto reset;
}
if (is_locked_view(thd, table_list))
{
if (table_list->sequence)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str, table_list->alias.str);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE); // VIEW
}
/*
No table in the locked tables list. In case of explicit LOCK TABLES
this can happen if a user did not include the table into the list.
In case of pre-locked mode locked tables list is generated automatically,
so we may only end up here if the table did not exist when
locked tables list was created.
*/
if (thd->locked_tables_mode == LTM_PRELOCKED)
my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db.str, table_list->alias.str);
else
my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias);
DBUG_RETURN(TRUE);
}
/*
Non pre-locked/LOCK TABLES mode, and the table is not temporary.
This is the normal use case.
*/
if (! (flags & MYSQL_OPEN_HAS_MDL_LOCK))
{
/*
We are not under LOCK TABLES and going to acquire write-lock/
modify the base table. We need to acquire protection against
global read lock until end of this statement in order to have
this statement blocked by active FLUSH TABLES WITH READ LOCK.
We don't need to acquire this protection under LOCK TABLES as
such protection already acquired at LOCK TABLES time and
not released until UNLOCK TABLES.
We don't block statements which modify only temporary tables
as these tables are not preserved by any form of
backup which uses FLUSH TABLES WITH READ LOCK.
TODO: The fact that we sometimes acquire protection against
GRL only when we encounter table to be write-locked
slightly increases probability of deadlock.
This problem will be solved once Alik pushes his
temporary table refactoring patch and we can start
pre-acquiring metadata locks at the beggining of
open_tables() call.
*/
if (table_list->mdl_request.is_write_lock_request() &&
! (flags & (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
MYSQL_OPEN_FORCE_SHARED_MDL |
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK)) &&
! ot_ctx->has_protection_against_grl())
{
MDL_request protection_request;
MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
if (thd->global_read_lock.can_acquire_protection())
DBUG_RETURN(TRUE);
protection_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
MDL_STATEMENT);
/*
Install error handler which if possible will convert deadlock error
into request to back-off and restart process of opening tables.
*/
thd->push_internal_handler(&mdl_deadlock_handler);
bool result= thd->mdl_context.acquire_lock(&protection_request,
ot_ctx->get_timeout());
thd->pop_internal_handler();
if (result)
DBUG_RETURN(TRUE);
ot_ctx->set_has_protection_against_grl();
}
if (open_table_get_mdl_lock(thd, ot_ctx, &table_list->mdl_request,
flags, &mdl_ticket) ||
mdl_ticket == NULL)
{
DEBUG_SYNC(thd, "before_open_table_wait_refresh");
DBUG_RETURN(TRUE);
}
DEBUG_SYNC(thd, "after_open_table_mdl_shared");
}
else
{
/*
Grab reference to the MDL lock ticket that was acquired
by the caller.
*/
mdl_ticket= table_list->mdl_request.ticket;
}
if (table_list->open_strategy == TABLE_LIST::OPEN_IF_EXISTS)
{
if (!ha_table_exists(thd, &table_list->db, &table_list->table_name))
DBUG_RETURN(FALSE);
}
else if (table_list->open_strategy == TABLE_LIST::OPEN_STUB)
DBUG_RETURN(FALSE);
/* Table exists. Let us try to open it. */
if (table_list->i_s_requested_object & OPEN_TABLE_ONLY)
gts_flags= GTS_TABLE;
else if (table_list->i_s_requested_object & OPEN_VIEW_ONLY)
gts_flags= GTS_VIEW;
else
gts_flags= GTS_TABLE | GTS_VIEW;
retry_share:
share= tdc_acquire_share(thd, table_list, gts_flags, &table);
if (unlikely(!share))
{
/*
Hide "Table doesn't exist" errors if the table belongs to a view.
The check for thd->is_error() is necessary to not push an
unwanted error in case the error was already silenced.
@todo Rework the alternative ways to deal with ER_NO_SUCH TABLE.
*/
if (thd->is_error())
{
if (table_list->parent_l)
{
thd->clear_error();
my_error(ER_WRONG_MRG_TABLE, MYF(0));
}
else if (table_list->belong_to_view)
{
TABLE_LIST *view= table_list->belong_to_view;
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0),
view->view_db.str, view->view_name.str);
}
}
DBUG_RETURN(TRUE);
}
/*
Check if this TABLE_SHARE-object corresponds to a view. Note, that there is
no need to check TABLE_SHARE::tdc.flushed as we do for regular tables,
because view shares are always up to date.
*/
if (share->is_view)
{
/*
If parent_l of the table_list is non null then a merge table
has this view as child table, which is not supported.
*/
if (table_list->parent_l)
{
my_error(ER_WRONG_MRG_TABLE, MYF(0));
goto err_lock;
}
if (table_list->sequence)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str,
table_list->alias.str);
goto err_lock;
}
/*
This table is a view. Validate its metadata version: in particular,
that it was a view when the statement was prepared.
*/
if (check_and_update_table_version(thd, table_list, share))
goto err_lock;
/* Open view */
if (mysql_make_view(thd, share, table_list, false))
goto err_lock;
/* TODO: Don't free this */
tdc_release_share(share);
DBUG_ASSERT(table_list->view);
DBUG_RETURN(FALSE);
}
#ifdef WITH_WSREP
if (!((flags & MYSQL_OPEN_IGNORE_FLUSH) ||
(thd->wsrep_applier)))
#else
if (!(flags & MYSQL_OPEN_IGNORE_FLUSH))
#endif
{
if (share->tdc->flushed)
{
DBUG_PRINT("info", ("Found old share version: %lld current: %lld",
share->tdc->version, tdc_refresh_version()));
/*
We already have an MDL lock. But we have encountered an old
version of table in the table definition cache which is possible
when someone changes the table version directly in the cache
without acquiring a metadata lock (e.g. this can happen during
"rolling" FLUSH TABLE(S)).
Release our reference to share, wait until old version of
share goes away and then try to get new version of table share.
*/
if (table)
tc_release_table(table);
else
tdc_release_share(share);
MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
bool wait_result;
thd->push_internal_handler(&mdl_deadlock_handler);
wait_result= tdc_wait_for_old_version(thd, table_list->db.str,
table_list->table_name.str,
ot_ctx->get_timeout(),
mdl_ticket->get_deadlock_weight());
thd->pop_internal_handler();
if (wait_result)
DBUG_RETURN(TRUE);
goto retry_share;
}
if (thd->open_tables && thd->open_tables->s->tdc->flushed)
{
/*
If the version changes while we're opening the tables,
we have to back off, close all the tables opened-so-far,
and try to reopen them. Note: refresh_version is currently
changed only during FLUSH TABLES.
*/
if (table)
tc_release_table(table);
else
tdc_release_share(share);
(void)ot_ctx->request_backoff_action(Open_table_context::OT_REOPEN_TABLES,
NULL);
DBUG_RETURN(TRUE);
}
}
if (table)
{
DBUG_ASSERT(table->file != NULL);
MYSQL_REBIND_TABLE(table->file);
#ifdef WITH_PARTITION_STORAGE_ENGINE
part_names_error= set_partitions_as_used(table_list, table);
#endif
}
else
{
enum open_frm_error error;
/* make a new table */
if (!(table=(TABLE*) my_malloc(sizeof(*table),MYF(MY_WME))))
goto err_lock;
error= open_table_from_share(thd, share, &table_list->alias,
HA_OPEN_KEYFILE | HA_TRY_READ_ONLY,
EXTRA_RECORD,
thd->open_options, table, FALSE,
IF_PARTITIONING(table_list->partition_names,0));
if (unlikely(error))
{
my_free(table);
if (error == OPEN_FRM_DISCOVER)
(void) ot_ctx->request_backoff_action(Open_table_context::OT_DISCOVER,
table_list);
else if (share->crashed)
{
if (!(flags & MYSQL_OPEN_IGNORE_REPAIR))
(void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR,
table_list);
else
table_list->crashed= 1; /* Mark that table was crashed */
}
goto err_lock;
}
if (open_table_entry_fini(thd, share, table))
{
closefrm(table);
my_free(table);
goto err_lock;
}
/* Add table to the share's used tables list. */
tc_add_table(thd, table);
from_share= true;
}
table->mdl_ticket= mdl_ticket;
table->reginfo.lock_type=TL_READ; /* Assume read */
table->init(thd, table_list);
table->next= thd->open_tables; /* Link into simple list */
thd->set_open_tables(table);
reset:
/*
Check that there is no reference to a condition from an earlier query
(cf. Bug#58553).
*/
DBUG_ASSERT(table->file->pushed_cond == NULL);
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
table_list->table= table;
if (!from_share && table->vcol_fix_expr(thd))
goto err_lock;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (unlikely(table->part_info))
{
/* Partitions specified were incorrect.*/
if (part_names_error)
{
table->file->print_error(part_names_error, MYF(0));
DBUG_RETURN(true);
}
}
else if (table_list->partition_names)
{
/* Don't allow PARTITION () clause on a nonpartitioned table */
my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(true);
}
#endif
if (table_list->sequence && table->s->table_type != TABLE_TYPE_SEQUENCE)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str, table_list->alias.str);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE);
err_lock:
tdc_release_share(share);
DBUG_PRINT("exit", ("failed"));
DBUG_RETURN(TRUE);
}
|
336982129080685152460431545851496461461
|
None
|
CWE-416
|
CVE-2022-27376
|
MariaDB Server v10.6.5 and below was discovered to contain an use-after-free in the component Item_args::walk_arg, which is exploited via specially crafted SQL statements.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27376
|
|
511,472
|
server
|
b3c3291f0b7c1623cb20663f7cf31b7f749768bc
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/b3c3291f0b7c1623cb20663f7cf31b7f749768bc
|
MDEV-24176 fixup: GCC -Wmaybe-uninitialized
| 0
|
bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
{
TABLE *table;
const char *key;
uint key_length;
const char *alias= table_list->alias.str;
uint flags= ot_ctx->get_flags();
MDL_ticket *mdl_ticket;
TABLE_SHARE *share;
uint gts_flags;
bool from_share= false;
#ifdef WITH_PARTITION_STORAGE_ENGINE
int part_names_error=0;
#endif
DBUG_ENTER("open_table");
/*
The table must not be opened already. The table can be pre-opened for
some statements if it is a temporary table.
open_temporary_table() must be used to open temporary tables.
*/
DBUG_ASSERT(!table_list->table);
/* an open table operation needs a lot of the stack space */
if (check_stack_overrun(thd, STACK_MIN_SIZE_FOR_OPEN, (uchar *)&alias))
DBUG_RETURN(TRUE);
if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed)
{
thd->send_kill_message();
DBUG_RETURN(TRUE);
}
/*
Check if we're trying to take a write lock in a read only transaction.
Note that we allow write locks on log tables as otherwise logging
to general/slow log would be disabled in read only transactions.
*/
if (table_list->mdl_request.is_write_lock_request() &&
thd->tx_read_only &&
!(flags & (MYSQL_LOCK_LOG_TABLE | MYSQL_OPEN_HAS_MDL_LOCK)))
{
my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
DBUG_RETURN(true);
}
if (!table_list->db.str)
{
my_error(ER_NO_DB_ERROR, MYF(0));
DBUG_RETURN(true);
}
key_length= get_table_def_key(table_list, &key);
/*
If we're in pre-locked or LOCK TABLES mode, let's try to find the
requested table in the list of pre-opened and locked tables. If the
table is not there, return an error - we can't open not pre-opened
tables in pre-locked/LOCK TABLES mode.
TODO: move this block into a separate function.
*/
if (thd->locked_tables_mode &&
! (flags & MYSQL_OPEN_GET_NEW_TABLE))
{ // Using table locks
TABLE *best_table= 0;
int best_distance= INT_MIN;
for (table=thd->open_tables; table ; table=table->next)
{
if (table->s->table_cache_key.length == key_length &&
!memcmp(table->s->table_cache_key.str, key, key_length))
{
if (!my_strcasecmp(system_charset_info, table->alias.c_ptr(), alias) &&
table->query_id != thd->query_id && /* skip tables already used */
(thd->locked_tables_mode == LTM_LOCK_TABLES ||
table->query_id == 0))
{
int distance= ((int) table->reginfo.lock_type -
(int) table_list->lock_type);
/*
Find a table that either has the exact lock type requested,
or has the best suitable lock. In case there is no locked
table that has an equal or higher lock than requested,
we us the closest matching lock to be able to produce an error
message about wrong lock mode on the table. The best_table
is changed if bd < 0 <= d or bd < d < 0 or 0 <= d < bd.
distance < 0 - No suitable lock found
distance > 0 - we have lock mode higher then we require
distance == 0 - we have lock mode exactly which we need
*/
if ((best_distance < 0 && distance > best_distance) ||
(distance >= 0 && distance < best_distance))
{
best_distance= distance;
best_table= table;
if (best_distance == 0)
{
/*
We have found a perfect match and can finish iterating
through open tables list. Check for table use conflict
between calling statement and SP/trigger is done in
lock_tables().
*/
break;
}
}
}
}
}
if (best_table)
{
table= best_table;
table->query_id= thd->query_id;
table->init(thd, table_list);
DBUG_PRINT("info",("Using locked table"));
#ifdef WITH_PARTITION_STORAGE_ENGINE
part_names_error= set_partitions_as_used(table_list, table);
#endif
goto reset;
}
if (is_locked_view(thd, table_list))
{
if (table_list->sequence)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str, table_list->alias.str);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE); // VIEW
}
/*
No table in the locked tables list. In case of explicit LOCK TABLES
this can happen if a user did not include the table into the list.
In case of pre-locked mode locked tables list is generated automatically,
so we may only end up here if the table did not exist when
locked tables list was created.
*/
if (thd->locked_tables_mode == LTM_PRELOCKED)
my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db.str, table_list->alias.str);
else
my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias);
DBUG_RETURN(TRUE);
}
/*
Non pre-locked/LOCK TABLES mode, and the table is not temporary.
This is the normal use case.
*/
if (! (flags & MYSQL_OPEN_HAS_MDL_LOCK))
{
/*
We are not under LOCK TABLES and going to acquire write-lock/
modify the base table. We need to acquire protection against
global read lock until end of this statement in order to have
this statement blocked by active FLUSH TABLES WITH READ LOCK.
We don't need to acquire this protection under LOCK TABLES as
such protection already acquired at LOCK TABLES time and
not released until UNLOCK TABLES.
We don't block statements which modify only temporary tables
as these tables are not preserved by any form of
backup which uses FLUSH TABLES WITH READ LOCK.
TODO: The fact that we sometimes acquire protection against
GRL only when we encounter table to be write-locked
slightly increases probability of deadlock.
This problem will be solved once Alik pushes his
temporary table refactoring patch and we can start
pre-acquiring metadata locks at the beggining of
open_tables() call.
*/
if (table_list->mdl_request.is_write_lock_request() &&
! (flags & (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
MYSQL_OPEN_FORCE_SHARED_MDL |
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK)) &&
! ot_ctx->has_protection_against_grl())
{
MDL_request protection_request;
MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
if (thd->global_read_lock.can_acquire_protection())
DBUG_RETURN(TRUE);
protection_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
MDL_STATEMENT);
/*
Install error handler which if possible will convert deadlock error
into request to back-off and restart process of opening tables.
*/
thd->push_internal_handler(&mdl_deadlock_handler);
bool result= thd->mdl_context.acquire_lock(&protection_request,
ot_ctx->get_timeout());
thd->pop_internal_handler();
if (result)
DBUG_RETURN(TRUE);
ot_ctx->set_has_protection_against_grl();
}
if (open_table_get_mdl_lock(thd, ot_ctx, &table_list->mdl_request,
flags, &mdl_ticket) ||
mdl_ticket == NULL)
{
DEBUG_SYNC(thd, "before_open_table_wait_refresh");
DBUG_RETURN(TRUE);
}
DEBUG_SYNC(thd, "after_open_table_mdl_shared");
}
else
{
/*
Grab reference to the MDL lock ticket that was acquired
by the caller.
*/
mdl_ticket= table_list->mdl_request.ticket;
}
if (table_list->open_strategy == TABLE_LIST::OPEN_IF_EXISTS)
{
if (!ha_table_exists(thd, &table_list->db, &table_list->table_name))
DBUG_RETURN(FALSE);
}
else if (table_list->open_strategy == TABLE_LIST::OPEN_STUB)
DBUG_RETURN(FALSE);
/* Table exists. Let us try to open it. */
if (table_list->i_s_requested_object & OPEN_TABLE_ONLY)
gts_flags= GTS_TABLE;
else if (table_list->i_s_requested_object & OPEN_VIEW_ONLY)
gts_flags= GTS_VIEW;
else
gts_flags= GTS_TABLE | GTS_VIEW;
retry_share:
share= tdc_acquire_share(thd, table_list, gts_flags, &table);
if (unlikely(!share))
{
/*
Hide "Table doesn't exist" errors if the table belongs to a view.
The check for thd->is_error() is necessary to not push an
unwanted error in case the error was already silenced.
@todo Rework the alternative ways to deal with ER_NO_SUCH TABLE.
*/
if (thd->is_error())
{
if (table_list->parent_l)
{
thd->clear_error();
my_error(ER_WRONG_MRG_TABLE, MYF(0));
}
else if (table_list->belong_to_view)
{
TABLE_LIST *view= table_list->belong_to_view;
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0),
view->view_db.str, view->view_name.str);
}
}
DBUG_RETURN(TRUE);
}
/*
Check if this TABLE_SHARE-object corresponds to a view. Note, that there is
no need to check TABLE_SHARE::tdc.flushed as we do for regular tables,
because view shares are always up to date.
*/
if (share->is_view)
{
/*
If parent_l of the table_list is non null then a merge table
has this view as child table, which is not supported.
*/
if (table_list->parent_l)
{
my_error(ER_WRONG_MRG_TABLE, MYF(0));
goto err_lock;
}
if (table_list->sequence)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str,
table_list->alias.str);
goto err_lock;
}
/*
This table is a view. Validate its metadata version: in particular,
that it was a view when the statement was prepared.
*/
if (check_and_update_table_version(thd, table_list, share))
goto err_lock;
/* Open view */
if (mysql_make_view(thd, share, table_list, false))
goto err_lock;
/* TODO: Don't free this */
tdc_release_share(share);
DBUG_ASSERT(table_list->view);
DBUG_RETURN(FALSE);
}
#ifdef WITH_WSREP
if (!((flags & MYSQL_OPEN_IGNORE_FLUSH) ||
(thd->wsrep_applier)))
#else
if (!(flags & MYSQL_OPEN_IGNORE_FLUSH))
#endif
{
if (share->tdc->flushed)
{
DBUG_PRINT("info", ("Found old share version: %lld current: %lld",
share->tdc->version, tdc_refresh_version()));
/*
We already have an MDL lock. But we have encountered an old
version of table in the table definition cache which is possible
when someone changes the table version directly in the cache
without acquiring a metadata lock (e.g. this can happen during
"rolling" FLUSH TABLE(S)).
Release our reference to share, wait until old version of
share goes away and then try to get new version of table share.
*/
if (table)
tc_release_table(table);
else
tdc_release_share(share);
MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
bool wait_result;
thd->push_internal_handler(&mdl_deadlock_handler);
wait_result= tdc_wait_for_old_version(thd, table_list->db.str,
table_list->table_name.str,
ot_ctx->get_timeout(),
mdl_ticket->get_deadlock_weight());
thd->pop_internal_handler();
if (wait_result)
DBUG_RETURN(TRUE);
goto retry_share;
}
if (thd->open_tables && thd->open_tables->s->tdc->flushed)
{
/*
If the version changes while we're opening the tables,
we have to back off, close all the tables opened-so-far,
and try to reopen them. Note: refresh_version is currently
changed only during FLUSH TABLES.
*/
if (table)
tc_release_table(table);
else
tdc_release_share(share);
(void)ot_ctx->request_backoff_action(Open_table_context::OT_REOPEN_TABLES,
NULL);
DBUG_RETURN(TRUE);
}
}
if (table)
{
DBUG_ASSERT(table->file != NULL);
MYSQL_REBIND_TABLE(table->file);
#ifdef WITH_PARTITION_STORAGE_ENGINE
part_names_error= set_partitions_as_used(table_list, table);
#endif
}
else
{
enum open_frm_error error;
/* make a new table */
if (!(table=(TABLE*) my_malloc(sizeof(*table),MYF(MY_WME))))
goto err_lock;
error= open_table_from_share(thd, share, &table_list->alias,
HA_OPEN_KEYFILE | HA_TRY_READ_ONLY,
EXTRA_RECORD,
thd->open_options, table, FALSE,
IF_PARTITIONING(table_list->partition_names,0));
if (unlikely(error))
{
my_free(table);
if (error == OPEN_FRM_DISCOVER)
(void) ot_ctx->request_backoff_action(Open_table_context::OT_DISCOVER,
table_list);
else if (share->crashed)
{
if (!(flags & MYSQL_OPEN_IGNORE_REPAIR))
(void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR,
table_list);
else
table_list->crashed= 1; /* Mark that table was crashed */
}
goto err_lock;
}
if (open_table_entry_fini(thd, share, table))
{
closefrm(table);
my_free(table);
goto err_lock;
}
/* Add table to the share's used tables list. */
tc_add_table(thd, table);
from_share= true;
}
table->mdl_ticket= mdl_ticket;
table->reginfo.lock_type=TL_READ; /* Assume read */
table->init(thd, table_list);
table->next= thd->open_tables; /* Link into simple list */
thd->set_open_tables(table);
reset:
/*
Check that there is no reference to a condition from an earlier query
(cf. Bug#58553).
*/
DBUG_ASSERT(table->file->pushed_cond == NULL);
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
table_list->table= table;
if (!from_share && table->vcol_fix_expr(thd))
DBUG_RETURN(true);
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (unlikely(table->part_info))
{
/* Partitions specified were incorrect.*/
if (part_names_error)
{
table->file->print_error(part_names_error, MYF(0));
DBUG_RETURN(true);
}
}
else if (table_list->partition_names)
{
/* Don't allow PARTITION () clause on a nonpartitioned table */
my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(true);
}
#endif
if (table_list->sequence && table->s->table_type != TABLE_TYPE_SEQUENCE)
{
my_error(ER_NOT_SEQUENCE, MYF(0), table_list->db.str, table_list->alias.str);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE);
err_lock:
tdc_release_share(share);
DBUG_PRINT("exit", ("failed"));
DBUG_RETURN(TRUE);
}
|
83128391821270233968176562120215675650
|
None
|
CWE-416
|
CVE-2022-27376
|
MariaDB Server v10.6.5 and below was discovered to contain an use-after-free in the component Item_args::walk_arg, which is exploited via specially crafted SQL statements.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27376
|
|
216,945
|
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/807945f2eb5fa22e6f233cc17b85a2e141efe2c8
|
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
| 1
|
bool Item_equal::create_pushable_equalities(THD *thd,
List<Item> *equalities,
Pushdown_checker checker,
uchar *arg,
bool clone_const)
{
Item *item;
Item *left_item= NULL;
Item *right_item = get_const();
Item_equal_fields_iterator it(*this);
while ((item=it++))
{
left_item= item;
if (checker && !((item->*checker) (arg)))
continue;
break;
}
if (!left_item)
return false;
if (right_item)
{
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= !clone_const ?
right_item : right_item->build_clone(thd);
if (!left_item_clone || !right_item_clone)
return true;
eq= new (thd->mem_root) Item_func_eq(thd,
left_item_clone,
right_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
if (!clone_const)
right_item->set_extraction_flag(IMMUTABLE_FL);
}
while ((item=it++))
{
if (checker && !((item->*checker) (arg)))
continue;
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= item->build_clone(thd);
if (!(left_item_clone && right_item_clone))
return true;
left_item_clone->set_item_equal(NULL);
right_item_clone->set_item_equal(NULL);
eq= new (thd->mem_root) Item_func_eq(thd,
right_item_clone,
left_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
}
return false;
}
|
310506936609045112273577090786370872524
|
None
|
CWE-617
|
CVE-2022-27382
|
MariaDB Server v10.7 and below was discovered to contain a segmentation fault via the component Item_field::used_tables/update_depend_map_for_order.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27382
|
|
512,825
|
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/807945f2eb5fa22e6f233cc17b85a2e141efe2c8
|
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
| 0
|
bool Item_equal::create_pushable_equalities(THD *thd,
List<Item> *equalities,
Pushdown_checker checker,
uchar *arg,
bool clone_const)
{
Item *item;
Item *left_item= NULL;
Item *right_item = get_const();
Item_equal_fields_iterator it(*this);
while ((item=it++))
{
left_item= item;
if (checker && !((item->*checker) (arg)))
continue;
break;
}
if (!left_item)
return false;
if (right_item)
{
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= !clone_const ?
right_item : right_item->build_clone(thd);
if (!left_item_clone || !right_item_clone)
return true;
eq= new (thd->mem_root) Item_func_eq(thd,
left_item_clone,
right_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
if (!clone_const)
{
/*
Also set IMMUTABLE_FL for any sub-items of the right_item.
This is needed to prevent Item::cleanup_excluding_immutables_processor
from peforming cleanup of the sub-items and so creating an item tree
where a fixed item has non-fixed items inside it.
*/
int new_flag= IMMUTABLE_FL;
right_item->walk(&Item::set_extraction_flag_processor, false,
(void*)&new_flag);
}
}
while ((item=it++))
{
if (checker && !((item->*checker) (arg)))
continue;
Item_func_eq *eq= 0;
Item *left_item_clone= left_item->build_clone(thd);
Item *right_item_clone= item->build_clone(thd);
if (!(left_item_clone && right_item_clone))
return true;
left_item_clone->set_item_equal(NULL);
right_item_clone->set_item_equal(NULL);
eq= new (thd->mem_root) Item_func_eq(thd,
right_item_clone,
left_item_clone);
if (!eq || equalities->push_back(eq, thd->mem_root))
return true;
}
return false;
}
|
247600252359942672774915786547473250797
|
None
|
CWE-617
|
CVE-2022-27382
|
MariaDB Server v10.7 and below was discovered to contain a segmentation fault via the component Item_field::used_tables/update_depend_map_for_order.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27382
|
|
216,946
|
server
|
c05fd700970ad45735caed3a6f9930d4ce19a3bd
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/c05fd700970ad45735caed3a6f9930d4ce19a3bd
|
MDEV-26323 use-after-poison issue of MariaDB server
| 1
|
static void fix_dl_name(MEM_ROOT *root, LEX_STRING *dl)
{
const size_t so_ext_len= sizeof(SO_EXT) - 1;
if (my_strcasecmp(&my_charset_latin1, dl->str + dl->length - so_ext_len,
SO_EXT))
{
char *s= (char*)alloc_root(root, dl->length + so_ext_len + 1);
memcpy(s, dl->str, dl->length);
strcpy(s + dl->length, SO_EXT);
dl->str= s;
dl->length+= so_ext_len;
}
}
|
339917125862392009139749816960654857347
|
None
|
CWE-416
|
CVE-2022-27383
|
MariaDB Server v10.6 and below was discovered to contain an use-after-free in the component my_strcasecmp_8bit, which is exploited via specially crafted SQL statements.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27383
|
|
513,202
|
server
|
c05fd700970ad45735caed3a6f9930d4ce19a3bd
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/c05fd700970ad45735caed3a6f9930d4ce19a3bd
|
MDEV-26323 use-after-poison issue of MariaDB server
| 0
|
static void fix_dl_name(MEM_ROOT *root, LEX_STRING *dl)
{
const size_t so_ext_len= sizeof(SO_EXT) - 1;
if (dl->length < so_ext_len ||
my_strcasecmp(&my_charset_latin1, dl->str + dl->length - so_ext_len,
SO_EXT))
{
char *s= (char*)alloc_root(root, dl->length + so_ext_len + 1);
memcpy(s, dl->str, dl->length);
strcpy(s + dl->length, SO_EXT);
dl->str= s;
dl->length+= so_ext_len;
}
}
|
98463444639733543200591125506342217809
|
None
|
CWE-416
|
CVE-2022-27383
|
MariaDB Server v10.6 and below was discovered to contain an use-after-free in the component my_strcasecmp_8bit, which is exploited via specially crafted SQL statements.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27383
|
|
216,965
|
server
|
ecb6f9c894d3ebafeff1c6eb3b65cd248062296f
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/ecb6f9c894d3ebafeff1c6eb3b65cd248062296f
|
MDEV-28095 crash in multi-update and implicit grouping
disallow implicit grouping in multi-update.
explicit GROUP BY is not allowed by the grammar.
| 1
|
multi_update::initialize_tables(JOIN *join)
{
TABLE_LIST *table_ref;
DBUG_ENTER("initialize_tables");
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
error_if_full_join(join)))
DBUG_RETURN(1);
main_table=join->join_tab->table;
table_to_update= 0;
/* Any update has at least one pair (field, value) */
DBUG_ASSERT(fields->elements);
/*
Only one table may be modified by UPDATE of an updatable view.
For an updatable view first_table_for_update indicates this
table.
For a regular multi-update it refers to some updated table.
*/
TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
/* Create a temporary table for keys to all tables, except main table */
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
{
TABLE *table=table_ref->table;
uint cnt= table_ref->shared;
List<Item> temp_fields;
ORDER group;
TMP_TABLE_PARAM *tmp_param;
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (table == main_table) // First table in join
{
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
{
table_to_update= table; // Update table on the fly
has_vers_fields= table->vers_check_update(*fields);
continue;
}
}
table->prepare_for_position();
join->map2table[table->tablenr]->keep_current_rowid= true;
/*
enable uncacheable flag if we update a view with check option
and check option has a subselect, otherwise, the check option
can be evaluated after the subselect was freed as independent
(See full_local in JOIN::join_free()).
*/
if (table_ref->check_option && !join->select_lex->uncacheable)
{
SELECT_LEX_UNIT *tmp_unit;
SELECT_LEX *sl;
for (tmp_unit= join->select_lex->first_inner_unit();
tmp_unit;
tmp_unit= tmp_unit->next_unit())
{
for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
{
if (sl->master_unit()->item)
{
join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
goto loop_end;
}
}
}
}
loop_end:
if (table == first_table_for_update && table_ref->check_option)
{
table_map unupdated_tables= table_ref->check_option->used_tables() &
~first_table_for_update->map;
List_iterator<TABLE_LIST> ti(*leaves);
TABLE_LIST *tbl_ref;
while ((tbl_ref= ti++) && unupdated_tables)
{
if (unupdated_tables & tbl_ref->table->map)
unupdated_tables&= ~tbl_ref->table->map;
else
continue;
if (unupdated_check_opt_tables.push_back(tbl_ref->table))
DBUG_RETURN(1);
}
}
tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
original row so that we can find and update it. For the updatable
VIEW a few following fields are rowids of tables used in the CHECK
OPTION condition.
*/
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
TABLE *tbl= table;
do
{
LEX_CSTRING field_name;
field_name.str= tbl->alias.c_ptr();
field_name.length= strlen(field_name.str);
/*
Signal each table (including tables referenced by WITH CHECK OPTION
clause) for which we will store row position in the temporary table
that we need a position to be read first.
*/
tbl->prepare_for_position();
join->map2table[tbl->tablenr]->keep_current_rowid= true;
Item_temptable_rowid *item=
new (thd->mem_root) Item_temptable_rowid(tbl);
if (!item)
DBUG_RETURN(1);
item->fix_fields(thd, 0);
if (temp_fields.push_back(item, thd->mem_root))
DBUG_RETURN(1);
} while ((tbl= tbl_it++));
temp_fields.append(fields_for_table[cnt]);
/* Make an unique key over the first field to avoid duplicated updates */
bzero((char*) &group, sizeof(group));
group.direction= ORDER::ORDER_ASC;
group.item= (Item**) temp_fields.head_ref();
tmp_param->quick_group= 1;
tmp_param->field_count= temp_fields.elements;
tmp_param->func_count= temp_fields.elements - 1;
calc_group_buffer(tmp_param, &group);
/* small table, ignore SQL_BIG_TABLES */
my_bool save_big_tables= thd->variables.big_tables;
thd->variables.big_tables= FALSE;
tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
(ORDER*) &group, 0, 0,
TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, &empty_clex_str);
thd->variables.big_tables= save_big_tables;
if (!tmp_tables[cnt])
DBUG_RETURN(1);
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
join->tmp_table_keep_current_rowid= TRUE;
DBUG_RETURN(0);
}
|
133589877756276645137859301197732446596
|
None
|
CWE-617
|
CVE-2022-27448
|
There is an Assertion failure in MariaDB Server v10.9 and below via 'node->pcur->rel_pos == BTR_PCUR_ON' at /row/row0mysql.cc.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27448
|
|
514,292
|
server
|
ecb6f9c894d3ebafeff1c6eb3b65cd248062296f
|
https://github.com/MariaDB/server
|
https://github.com/MariaDB/server/commit/ecb6f9c894d3ebafeff1c6eb3b65cd248062296f
|
MDEV-28095 crash in multi-update and implicit grouping
disallow implicit grouping in multi-update.
explicit GROUP BY is not allowed by the grammar.
| 0
|
multi_update::initialize_tables(JOIN *join)
{
TABLE_LIST *table_ref;
DBUG_ENTER("initialize_tables");
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
error_if_full_join(join)))
DBUG_RETURN(1);
if (join->implicit_grouping)
{
my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0));
DBUG_RETURN(1);
}
main_table=join->join_tab->table;
table_to_update= 0;
/* Any update has at least one pair (field, value) */
DBUG_ASSERT(fields->elements);
/*
Only one table may be modified by UPDATE of an updatable view.
For an updatable view first_table_for_update indicates this
table.
For a regular multi-update it refers to some updated table.
*/
TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
/* Create a temporary table for keys to all tables, except main table */
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
{
TABLE *table=table_ref->table;
uint cnt= table_ref->shared;
List<Item> temp_fields;
ORDER group;
TMP_TABLE_PARAM *tmp_param;
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (table == main_table) // First table in join
{
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
{
table_to_update= table; // Update table on the fly
has_vers_fields= table->vers_check_update(*fields);
continue;
}
}
table->prepare_for_position();
join->map2table[table->tablenr]->keep_current_rowid= true;
/*
enable uncacheable flag if we update a view with check option
and check option has a subselect, otherwise, the check option
can be evaluated after the subselect was freed as independent
(See full_local in JOIN::join_free()).
*/
if (table_ref->check_option && !join->select_lex->uncacheable)
{
SELECT_LEX_UNIT *tmp_unit;
SELECT_LEX *sl;
for (tmp_unit= join->select_lex->first_inner_unit();
tmp_unit;
tmp_unit= tmp_unit->next_unit())
{
for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
{
if (sl->master_unit()->item)
{
join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
goto loop_end;
}
}
}
}
loop_end:
if (table == first_table_for_update && table_ref->check_option)
{
table_map unupdated_tables= table_ref->check_option->used_tables() &
~first_table_for_update->map;
List_iterator<TABLE_LIST> ti(*leaves);
TABLE_LIST *tbl_ref;
while ((tbl_ref= ti++) && unupdated_tables)
{
if (unupdated_tables & tbl_ref->table->map)
unupdated_tables&= ~tbl_ref->table->map;
else
continue;
if (unupdated_check_opt_tables.push_back(tbl_ref->table))
DBUG_RETURN(1);
}
}
tmp_param= tmp_table_param+cnt;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
original row so that we can find and update it. For the updatable
VIEW a few following fields are rowids of tables used in the CHECK
OPTION condition.
*/
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
TABLE *tbl= table;
do
{
LEX_CSTRING field_name;
field_name.str= tbl->alias.c_ptr();
field_name.length= strlen(field_name.str);
/*
Signal each table (including tables referenced by WITH CHECK OPTION
clause) for which we will store row position in the temporary table
that we need a position to be read first.
*/
tbl->prepare_for_position();
join->map2table[tbl->tablenr]->keep_current_rowid= true;
Item_temptable_rowid *item=
new (thd->mem_root) Item_temptable_rowid(tbl);
if (!item)
DBUG_RETURN(1);
item->fix_fields(thd, 0);
if (temp_fields.push_back(item, thd->mem_root))
DBUG_RETURN(1);
} while ((tbl= tbl_it++));
temp_fields.append(fields_for_table[cnt]);
/* Make an unique key over the first field to avoid duplicated updates */
bzero((char*) &group, sizeof(group));
group.direction= ORDER::ORDER_ASC;
group.item= (Item**) temp_fields.head_ref();
tmp_param->quick_group= 1;
tmp_param->field_count= temp_fields.elements;
tmp_param->func_count= temp_fields.elements - 1;
calc_group_buffer(tmp_param, &group);
/* small table, ignore SQL_BIG_TABLES */
my_bool save_big_tables= thd->variables.big_tables;
thd->variables.big_tables= FALSE;
tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
(ORDER*) &group, 0, 0,
TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, &empty_clex_str);
thd->variables.big_tables= save_big_tables;
if (!tmp_tables[cnt])
DBUG_RETURN(1);
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
join->tmp_table_keep_current_rowid= TRUE;
DBUG_RETURN(0);
}
|
64239982738333152415398593981285799300
|
None
|
CWE-617
|
CVE-2022-27448
|
There is an Assertion failure in MariaDB Server v10.9 and below via 'node->pcur->rel_pos == BTR_PCUR_ON' at /row/row0mysql.cc.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-27448
|
|
217,176
|
monit
|
328f60773057641c4b2075fab9820145e95b728c
|
https://bitbucket.org/tildeslash/monit
|
https://bitbucket.org/tildeslash/monit/commits/328f60773057641c4b2075fab9820145e95b728c
|
Fixed: HTML escape the log file content when viewed via Monit GUI.
| 1
|
static void do_viewlog(HttpRequest req, HttpResponse res) {
if (is_readonly(req)) {
send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page");
return;
}
do_head(res, "_viewlog", "View log", 100);
if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) {
FILE *f = fopen(Run.files.log, "r");
if (f) {
size_t n;
char buf[512];
StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>");
while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) {
buf[n] = 0;
StringBuffer_append(res->outputbuffer, "%s", buf);
}
fclose(f);
StringBuffer_append(res->outputbuffer, "</textarea></form>");
} else {
StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR);
}
} else {
StringBuffer_append(res->outputbuffer,
"<b>Cannot view logfile:</b><br>");
if (! (Run.flags & Run_Log))
StringBuffer_append(res->outputbuffer, "Monit was started without logging");
else
StringBuffer_append(res->outputbuffer, "Monit uses syslog");
}
do_foot(res);
}
|
11066915327345888296428446172682700897
|
None
|
CWE-79
|
CVE-2019-11454
|
Persistent cross-site scripting (XSS) in http/cervlet.c in Tildeslash Monit before 5.25.3 allows a remote unauthenticated attacker to introduce arbitrary JavaScript via manipulation of an unsanitized user field of the Authorization header for HTTP Basic Authentication, which is mishandled during an _viewlog operation.
|
https://nvd.nist.gov/vuln/detail/CVE-2019-11454
|
|
517,437
|
monit
|
328f60773057641c4b2075fab9820145e95b728c
|
https://bitbucket.org/tildeslash/monit
|
https://bitbucket.org/tildeslash/monit/commits/328f60773057641c4b2075fab9820145e95b728c
|
Fixed: HTML escape the log file content when viewed via Monit GUI.
| 0
|
static void do_viewlog(HttpRequest req, HttpResponse res) {
if (is_readonly(req)) {
send_error(req, res, SC_FORBIDDEN, "You do not have sufficient privileges to access this page");
return;
}
do_head(res, "_viewlog", "View log", 100);
if ((Run.flags & Run_Log) && ! (Run.flags & Run_UseSyslog)) {
FILE *f = fopen(Run.files.log, "r");
if (f) {
size_t n;
char buf[512];
StringBuffer_append(res->outputbuffer, "<br><p><form><textarea cols=120 rows=30 readonly>");
while ((n = fread(buf, sizeof(char), sizeof(buf) - 1, f)) > 0) {
buf[n] = 0;
escapeHTML(res->outputbuffer, buf);
}
fclose(f);
StringBuffer_append(res->outputbuffer, "</textarea></form>");
} else {
StringBuffer_append(res->outputbuffer, "Error opening logfile: %s", STRERROR);
}
} else {
StringBuffer_append(res->outputbuffer,
"<b>Cannot view logfile:</b><br>");
if (! (Run.flags & Run_Log))
StringBuffer_append(res->outputbuffer, "Monit was started without logging");
else
StringBuffer_append(res->outputbuffer, "Monit uses syslog");
}
do_foot(res);
}
|
289779635859944473579386698211739203129
|
None
|
CWE-79
|
CVE-2019-11454
|
Persistent cross-site scripting (XSS) in http/cervlet.c in Tildeslash Monit before 5.25.3 allows a remote unauthenticated attacker to introduce arbitrary JavaScript via manipulation of an unsanitized user field of the Authorization header for HTTP Basic Authentication, which is mishandled during an _viewlog operation.
|
https://nvd.nist.gov/vuln/detail/CVE-2019-11454
|
|
217,459
|
JUCE
|
2e874e80cba0152201aff6a4d0dc407997d10a7f
|
https://github.com/juce-framework/JUCE
|
https://github.com/juce-framework/JUCE/commit/2e874e80cba0152201aff6a4d0dc407997d10a7f
|
ZipFile: Add path checks to uncompressEntry()
| 1
|
Result ZipFile::uncompressEntry (int index, const File& targetDirectory, bool shouldOverwriteFiles)
{
auto* zei = entries.getUnchecked (index);
#if JUCE_WINDOWS
auto entryPath = zei->entry.filename;
#else
auto entryPath = zei->entry.filename.replaceCharacter ('\\', '/');
#endif
if (entryPath.isEmpty())
return Result::ok();
auto targetFile = targetDirectory.getChildFile (entryPath);
if (entryPath.endsWithChar ('/') || entryPath.endsWithChar ('\\'))
return targetFile.createDirectory(); // (entry is a directory, not a file)
std::unique_ptr<InputStream> in (createStreamForEntry (index));
if (in == nullptr)
return Result::fail ("Failed to open the zip file for reading");
if (targetFile.exists())
{
if (! shouldOverwriteFiles)
return Result::ok();
if (! targetFile.deleteFile())
return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName());
}
if (! targetFile.getParentDirectory().createDirectory())
return Result::fail ("Failed to create target folder: " + targetFile.getParentDirectory().getFullPathName());
if (zei->entry.isSymbolicLink)
{
String originalFilePath (in->readEntireStreamAsString()
.replaceCharacter (L'/', File::getSeparatorChar()));
if (! File::createSymbolicLink (targetFile, originalFilePath, true))
return Result::fail ("Failed to create symbolic link: " + originalFilePath);
}
else
{
FileOutputStream out (targetFile);
if (out.failedToOpen())
return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName());
out << *in;
}
targetFile.setCreationTime (zei->entry.fileTime);
targetFile.setLastModificationTime (zei->entry.fileTime);
targetFile.setLastAccessTime (zei->entry.fileTime);
return Result::ok();
}
|
147242935972698231254697341250203186621
|
None
|
CWE-59
|
CVE-2021-23521
|
This affects the package juce-framework/JUCE before 6.1.5. This vulnerability is triggered when a malicious archive is crafted with an entry containing a symbolic link. When extracted, the symbolic link is followed outside of the target dir allowing writing arbitrary files on the target host. In some cases, this can allow an attacker to execute arbitrary code. The vulnerable code is in the ZipFile::uncompressEntry function in juce_ZipFile.cpp and is executed when the archive is extracted upon calling uncompressTo() on a ZipFile object.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-23521
|
|
521,488
|
JUCE
|
2e874e80cba0152201aff6a4d0dc407997d10a7f
|
https://github.com/juce-framework/JUCE
|
https://github.com/juce-framework/JUCE/commit/2e874e80cba0152201aff6a4d0dc407997d10a7f
|
ZipFile: Add path checks to uncompressEntry()
| 0
|
Result ZipFile::uncompressEntry (int index, const File& targetDirectory, OverwriteFiles overwriteFiles, FollowSymlinks followSymlinks)
{
auto* zei = entries.getUnchecked (index);
#if JUCE_WINDOWS
auto entryPath = zei->entry.filename;
#else
auto entryPath = zei->entry.filename.replaceCharacter ('\\', '/');
#endif
if (entryPath.isEmpty())
return Result::ok();
auto targetFile = targetDirectory.getChildFile (entryPath);
if (! targetFile.isAChildOf (targetDirectory))
return Result::fail ("Entry " + entryPath + " is outside the target directory");
if (entryPath.endsWithChar ('/') || entryPath.endsWithChar ('\\'))
return targetFile.createDirectory(); // (entry is a directory, not a file)
std::unique_ptr<InputStream> in (createStreamForEntry (index));
if (in == nullptr)
return Result::fail ("Failed to open the zip file for reading");
if (targetFile.exists())
{
if (overwriteFiles == OverwriteFiles::no)
return Result::ok();
if (! targetFile.deleteFile())
return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName());
}
if (followSymlinks == FollowSymlinks::no && hasSymbolicPart (targetDirectory, targetFile.getParentDirectory()))
return Result::fail ("Parent directory leads through symlink for target file: " + targetFile.getFullPathName());
if (! targetFile.getParentDirectory().createDirectory())
return Result::fail ("Failed to create target folder: " + targetFile.getParentDirectory().getFullPathName());
if (zei->entry.isSymbolicLink)
{
String originalFilePath (in->readEntireStreamAsString()
.replaceCharacter (L'/', File::getSeparatorChar()));
if (! File::createSymbolicLink (targetFile, originalFilePath, true))
return Result::fail ("Failed to create symbolic link: " + originalFilePath);
}
else
{
FileOutputStream out (targetFile);
if (out.failedToOpen())
return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName());
out << *in;
}
targetFile.setCreationTime (zei->entry.fileTime);
targetFile.setLastModificationTime (zei->entry.fileTime);
targetFile.setLastAccessTime (zei->entry.fileTime);
return Result::ok();
}
|
31848199774747663184028918368749536503
|
None
|
CWE-59
|
CVE-2021-23521
|
This affects the package juce-framework/JUCE before 6.1.5. This vulnerability is triggered when a malicious archive is crafted with an entry containing a symbolic link. When extracted, the symbolic link is followed outside of the target dir allowing writing arbitrary files on the target host. In some cases, this can allow an attacker to execute arbitrary code. The vulnerable code is in the ZipFile::uncompressEntry function in juce_ZipFile.cpp and is executed when the archive is extracted upon calling uncompressTo() on a ZipFile object.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-23521
|
|
217,547
|
libMeshb
|
8cd68c54e0647c0030ae4506a225ad4a2655c316
|
https://github.com/LoicMarechal/libMeshb
|
https://github.com/LoicMarechal/libMeshb/commit/8cd68c54e0647c0030ae4506a225ad4a2655c316
|
Removed a potential buffer overflow crash in GmfOpenMesh and debugged the test_libmeshb_block_pipeline.f that crashed
| 1
|
int64_t GmfOpenMesh(const char *FilNam, int mod, ...)
{
int KwdCod, res, *PtrVer, *PtrDim, err;
int64_t MshIdx;
char str[ GmfStrSiz ];
va_list VarArg;
GmfMshSct *msh;
/*---------------------*/
/* MESH STRUCTURE INIT */
/*---------------------*/
if(!(msh = calloc(1, sizeof(GmfMshSct))))
return(0);
MshIdx = (int64_t)msh;
// Save the current stack environment for longjmp
if( (err = setjmp(msh->err)) != 0)
{
#ifdef GMFDEBUG
printf("libMeshb : mesh %p : error %d\n", msh, err);
#endif
if(msh->hdl != NULL)
fclose(msh->hdl);
if(msh->FilDes != 0)
#ifdef GMF_WINDOWS
_close(msh->FilDes);
#else
close(msh->FilDes);
#endif
free(msh);
return(0);
}
// Copy the FilNam into the structure
if(strlen(FilNam) + 7 >= GmfStrSiz)
longjmp(msh->err, -4);
strcpy(msh->FilNam, FilNam);
// Store the opening mod (read or write) and guess
// the filetype (binary or ascii) depending on the extension
msh->mod = mod;
msh->buf = (void *)msh->DblBuf;
msh->FltBuf = (void *)msh->DblBuf;
msh->IntBuf = (void *)msh->DblBuf;
if(strstr(msh->FilNam, ".meshb"))
msh->typ |= (Bin | MshFil);
else if(strstr(msh->FilNam, ".mesh"))
msh->typ |= (Asc | MshFil);
else if(strstr(msh->FilNam, ".solb"))
msh->typ |= (Bin | SolFil);
else if(strstr(msh->FilNam, ".sol"))
msh->typ |= (Asc | SolFil);
else
longjmp(msh->err, -5);
// Open the file in the required mod and initialize the mesh structure
if(msh->mod == GmfRead)
{
/*-----------------------*/
/* OPEN FILE FOR READING */
/*-----------------------*/
va_start(VarArg, mod);
PtrVer = va_arg(VarArg, int *);
PtrDim = va_arg(VarArg, int *);
va_end(VarArg);
// Read the endian coding tag, the mesh version
// and the mesh dimension (mandatory kwd)
if(msh->typ & Bin)
{
// Create the name string and open the file
#ifdef WITH_GMF_AIO
// [Bruno] added binary flag (necessary under Windows)
msh->FilDes = open(msh->FilNam, OPEN_READ_FLAGS, OPEN_READ_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -6);
// Read the endian coding tag
if(read(msh->FilDes, &msh->cod, WrdSiz) != WrdSiz)
longjmp(msh->err, -7);
#else
// [Bruno] added binary flag (necessary under Windows)
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -8);
// Read the endian coding tag
safe_fread(&msh->cod, WrdSiz, 1, msh->hdl, msh->err);
#endif
// Read the mesh version and the mesh dimension (mandatory kwd)
if( (msh->cod != 1) && (msh->cod != 16777216) )
longjmp(msh->err, -9);
ScaWrd(msh, (unsigned char *)&msh->ver);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -10);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -11);
ScaWrd(msh, (unsigned char *)&KwdCod);
if(KwdCod != GmfDimension)
longjmp(msh->err, -12);
GetPos(msh);
ScaWrd(msh, (unsigned char *)&msh->dim);
}
else
{
// Create the name string and open the file
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -13);
do
{
res = fscanf(msh->hdl, "%s", str);
}while( (res != EOF) && strcmp(str, "MeshVersionFormatted") );
if(res == EOF)
longjmp(msh->err, -14);
safe_fscanf(msh->hdl, "%d", &msh->ver, msh->err);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -15);
do
{
res = fscanf(msh->hdl, "%s", str);
}while( (res != EOF) && strcmp(str, "Dimension") );
if(res == EOF)
longjmp(msh->err, -16);
safe_fscanf(msh->hdl, "%d", &msh->dim, msh->err);
}
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -17);
(*PtrVer) = msh->ver;
(*PtrDim) = msh->dim;
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
/*------------*/
/* KW READING */
/*------------*/
// Read the list of kw present in the file
if(!ScaKwdTab(msh))
return(0);
return(MshIdx);
}
else if(msh->mod == GmfWrite)
{
/*-----------------------*/
/* OPEN FILE FOR WRITING */
/*-----------------------*/
msh->cod = 1;
// Check if the user provided a valid version number and dimension
va_start(VarArg, mod);
msh->ver = va_arg(VarArg, int);
msh->dim = va_arg(VarArg, int);
va_end(VarArg);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -18);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -19);
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -20);
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
// Create the mesh file
if(msh->typ & Bin)
{
/*
* [Bruno] replaced previous call to creat():
* with a call to open(), because Windows needs the
* binary flag to be specified.
*/
#ifdef WITH_GMF_AIO
msh->FilDes = open(msh->FilNam, OPEN_WRITE_FLAGS, OPEN_WRITE_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -21);
#else
if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -22);
#endif
}
else if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -23);
/*------------*/
/* KW WRITING */
/*------------*/
// Write the mesh version and dimension
if(msh->typ & Asc)
{
fprintf(msh->hdl, "%s %d\n\n",
GmfKwdFmt[ GmfVersionFormatted ][0], msh->ver);
fprintf(msh->hdl, "%s %d\n",
GmfKwdFmt[ GmfDimension ][0], msh->dim);
}
else
{
RecWrd(msh, (unsigned char *)&msh->cod);
RecWrd(msh, (unsigned char *)&msh->ver);
GmfSetKwd(MshIdx, GmfDimension, 0);
RecWrd(msh, (unsigned char *)&msh->dim);
}
return(MshIdx);
}
else
{
free(msh);
return(0);
}
}
|
15680638155848863443525622432898430696
|
None
|
CWE-120
|
CVE-2021-46225
|
A buffer overflow in the GmfOpenMesh() function of libMeshb v7.61 allows attackers to cause a Denial of Service (DoS) via a crafted MESH file.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46225
|
|
522,329
|
libMeshb
|
8cd68c54e0647c0030ae4506a225ad4a2655c316
|
https://github.com/LoicMarechal/libMeshb
|
https://github.com/LoicMarechal/libMeshb/commit/8cd68c54e0647c0030ae4506a225ad4a2655c316
|
Removed a potential buffer overflow crash in GmfOpenMesh and debugged the test_libmeshb_block_pipeline.f that crashed
| 0
|
int64_t GmfOpenMesh(const char *FilNam, int mod, ...)
{
int KwdCod, res, *PtrVer, *PtrDim, err;
int64_t MshIdx;
char str[ GmfStrSiz ];
va_list VarArg;
GmfMshSct *msh;
/*---------------------*/
/* MESH STRUCTURE INIT */
/*---------------------*/
if(!(msh = calloc(1, sizeof(GmfMshSct))))
return(0);
MshIdx = (int64_t)msh;
// Save the current stack environment for longjmp
if( (err = setjmp(msh->err)) != 0)
{
#ifdef GMFDEBUG
printf("libMeshb : mesh %p : error %d\n", msh, err);
#endif
if(msh->hdl != NULL)
fclose(msh->hdl);
if(msh->FilDes != 0)
#ifdef GMF_WINDOWS
_close(msh->FilDes);
#else
close(msh->FilDes);
#endif
free(msh);
return(0);
}
// Copy the FilNam into the structure
if(strlen(FilNam) + 7 >= GmfStrSiz)
longjmp(msh->err, -4);
strcpy(msh->FilNam, FilNam);
// Store the opening mod (read or write) and guess
// the filetype (binary or ascii) depending on the extension
msh->mod = mod;
msh->buf = (void *)msh->DblBuf;
msh->FltBuf = (void *)msh->DblBuf;
msh->IntBuf = (void *)msh->DblBuf;
if(strstr(msh->FilNam, ".meshb"))
msh->typ |= (Bin | MshFil);
else if(strstr(msh->FilNam, ".mesh"))
msh->typ |= (Asc | MshFil);
else if(strstr(msh->FilNam, ".solb"))
msh->typ |= (Bin | SolFil);
else if(strstr(msh->FilNam, ".sol"))
msh->typ |= (Asc | SolFil);
else
longjmp(msh->err, -5);
// Open the file in the required mod and initialize the mesh structure
if(msh->mod == GmfRead)
{
/*-----------------------*/
/* OPEN FILE FOR READING */
/*-----------------------*/
va_start(VarArg, mod);
PtrVer = va_arg(VarArg, int *);
PtrDim = va_arg(VarArg, int *);
va_end(VarArg);
// Read the endian coding tag, the mesh version
// and the mesh dimension (mandatory kwd)
if(msh->typ & Bin)
{
// Create the name string and open the file
#ifdef WITH_GMF_AIO
// [Bruno] added binary flag (necessary under Windows)
msh->FilDes = open(msh->FilNam, OPEN_READ_FLAGS, OPEN_READ_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -6);
// Read the endian coding tag
if(read(msh->FilDes, &msh->cod, WrdSiz) != WrdSiz)
longjmp(msh->err, -7);
#else
// [Bruno] added binary flag (necessary under Windows)
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -8);
// Read the endian coding tag
safe_fread(&msh->cod, WrdSiz, 1, msh->hdl, msh->err);
#endif
// Read the mesh version and the mesh dimension (mandatory kwd)
if( (msh->cod != 1) && (msh->cod != 16777216) )
longjmp(msh->err, -9);
ScaWrd(msh, (unsigned char *)&msh->ver);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -10);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -11);
ScaWrd(msh, (unsigned char *)&KwdCod);
if(KwdCod != GmfDimension)
longjmp(msh->err, -12);
GetPos(msh);
ScaWrd(msh, (unsigned char *)&msh->dim);
}
else
{
// Create the name string and open the file
if(!(msh->hdl = fopen(msh->FilNam, "rb")))
longjmp(msh->err, -13);
do
{
res = fscanf(msh->hdl, "%100s", str);
}while( (res != EOF) && strcmp(str, "MeshVersionFormatted") );
if(res == EOF)
longjmp(msh->err, -14);
safe_fscanf(msh->hdl, "%d", &msh->ver, msh->err);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -15);
do
{
res = fscanf(msh->hdl, "%100s", str);
}while( (res != EOF) && strcmp(str, "Dimension") );
if(res == EOF)
longjmp(msh->err, -16);
safe_fscanf(msh->hdl, "%d", &msh->dim, msh->err);
}
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -17);
(*PtrVer) = msh->ver;
(*PtrDim) = msh->dim;
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
/*------------*/
/* KW READING */
/*------------*/
// Read the list of kw present in the file
if(!ScaKwdTab(msh))
return(0);
return(MshIdx);
}
else if(msh->mod == GmfWrite)
{
/*-----------------------*/
/* OPEN FILE FOR WRITING */
/*-----------------------*/
msh->cod = 1;
// Check if the user provided a valid version number and dimension
va_start(VarArg, mod);
msh->ver = va_arg(VarArg, int);
msh->dim = va_arg(VarArg, int);
va_end(VarArg);
if( (msh->ver < 1) || (msh->ver > 4) )
longjmp(msh->err, -18);
if( (msh->ver >= 3) && (sizeof(int64_t) != 8) )
longjmp(msh->err, -19);
if( (msh->dim != 2) && (msh->dim != 3) )
longjmp(msh->err, -20);
// Set default real numbers size
if(msh->ver == 1)
msh->FltSiz = 32;
else
msh->FltSiz = 64;
// Create the mesh file
if(msh->typ & Bin)
{
/*
* [Bruno] replaced previous call to creat():
* with a call to open(), because Windows needs the
* binary flag to be specified.
*/
#ifdef WITH_GMF_AIO
msh->FilDes = open(msh->FilNam, OPEN_WRITE_FLAGS, OPEN_WRITE_MODE);
if(msh->FilDes <= 0)
longjmp(msh->err, -21);
#else
if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -22);
#endif
}
else if(!(msh->hdl = fopen(msh->FilNam, "wb")))
longjmp(msh->err, -23);
/*------------*/
/* KW WRITING */
/*------------*/
// Write the mesh version and dimension
if(msh->typ & Asc)
{
fprintf(msh->hdl, "%s %d\n\n",
GmfKwdFmt[ GmfVersionFormatted ][0], msh->ver);
fprintf(msh->hdl, "%s %d\n",
GmfKwdFmt[ GmfDimension ][0], msh->dim);
}
else
{
RecWrd(msh, (unsigned char *)&msh->cod);
RecWrd(msh, (unsigned char *)&msh->ver);
GmfSetKwd(MshIdx, GmfDimension, 0);
RecWrd(msh, (unsigned char *)&msh->dim);
}
return(MshIdx);
}
else
{
free(msh);
return(0);
}
}
|
94702895651186711005755032541348695201
|
None
|
CWE-120
|
CVE-2021-46225
|
A buffer overflow in the GmfOpenMesh() function of libMeshb v7.61 allows attackers to cause a Denial of Service (DoS) via a crafted MESH file.
|
https://nvd.nist.gov/vuln/detail/CVE-2021-46225
|
|
217,551
|
elfspirit
|
c5b0f5a9a24f2451bbeda4751d67633bc375e608
|
https://github.com/liyansong2018/elfspirit
|
https://github.com/liyansong2018/elfspirit/commit/c5b0f5a9a24f2451bbeda4751d67633bc375e608
|
Fix #1 about out-of-bounds
| 1
|
int parse(char *elf) {
int fd;
struct stat st;
uint8_t *elf_map;
int count;
char *tmp;
char *name;
char flag[4];
MODE = get_elf_class(elf);
fd = open(elf, O_RDONLY);
if (fd < 0) {
perror("open");
return -1;
}
if (fstat(fd, &st) < 0) {
perror("fstat");
return -1;
}
elf_map = mmap(0, st.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (elf_map == MAP_FAILED) {
perror("mmap");
return -1;
}
/* 32bit */
if (MODE == ELFCLASS32) {
/* ELF Header Information */
Elf32_Ehdr *ehdr;
ehdr = (Elf32_Ehdr *)elf_map;
INFO("ELF Header\n");
switch (ehdr->e_type) {
case ET_NONE:
tmp = "An unknown type";
break;
case ET_REL:
tmp = "A relocatable file";
break;
case ET_EXEC:
tmp = "An executable file";
break;
case ET_DYN:
tmp = "A shared object";
break;
case ET_CORE:
tmp = "A core file";
break;
default:
tmp = "An unknown type";
break;
}
PRINT_HEADER_EXP("e_type:", ehdr->e_type, tmp);
switch (ehdr->e_type) {
case EM_NONE:
tmp = "An unknown machine";
break;
case EM_M32:
tmp = "AT&T WE 32100";
break;
case EM_SPARC:
tmp = "Sun Microsystems SPARC";
break;
case EM_386:
tmp = "Intel 80386";
break;
case EM_68K:
tmp = "Motorola 68000";
break;
case EM_88K:
tmp = "Motorola 88000";
break;
case EM_860:
tmp = "Intel 80860";
break;
case EM_MIPS:
tmp = "MIPS RS3000 (big-endian only)";
break;
case EM_PARISC:
tmp = "HP/PA";
break;
case EM_SPARC32PLUS:
tmp = "SPARC with enhanced instruction set";
break;
case EM_PPC:
tmp = "PowerPC";
break;
case EM_PPC64:
tmp = "PowerPC 64-bit";
break;
case EM_S390:
tmp = "IBM S/390";
break;
case EM_ARM:
tmp = "Advanced RISC Machines";
break;
case EM_SH:
tmp = "Renesas SuperH";
break;
case EM_SPARCV9:
tmp = "SPARC v9 64-bit";
break;
case EM_IA_64:
tmp = "Intel Itanium";
break;
case EM_X86_64:
tmp = "AMD x86-64";
break;
case EM_VAX:
tmp = "DEC Vax";
break;
default:
tmp = "An unknown machine";
break;
}
PRINT_HEADER_EXP("e_machine:", ehdr->e_machine, tmp);
switch (ehdr->e_version) {
case EV_NONE:
tmp = "Invalid version";
break;
case EV_CURRENT:
tmp = "Current version";
break;
default:
tmp = "Known version";
break;
}
PRINT_HEADER_EXP("e_version:", ehdr->e_version, tmp);
PRINT_HEADER("e_entry:", ehdr->e_entry);
PRINT_HEADER("e_phoff:", ehdr->e_phoff);
PRINT_HEADER("e_shoff:", ehdr->e_shoff);
PRINT_HEADER("e_flags:", ehdr->e_flags);
PRINT_HEADER("e_ehsize:", ehdr->e_ehsize);
PRINT_HEADER("e_phentsize:", ehdr->e_phentsize);
PRINT_HEADER("e_phnum:", ehdr->e_phnum);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shstrndx:", ehdr->e_shstrndx);
/* Section Information */
Elf32_Shdr *shdr;
Elf32_Phdr *phdr;
Elf32_Shdr shstrtab;
shdr = (Elf32_Shdr *)&elf_map[ehdr->e_shoff];
phdr = (Elf32_Phdr *)&elf_map[ehdr->e_phoff];
shstrtab = shdr[ehdr->e_shstrndx];
INFO("Section Header Table\n");
PRINT_SECTION_TITLE("Nr", "Name", "Type", "Addr", "Off", "Size", "Es", "Flg", "Lk", "Inf", "Al");
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
switch (shdr[i].sh_type) {
case SHT_NULL:
tmp = "SHT_NULL";
break;
case SHT_PROGBITS:
tmp = "SHT_PROGBITS";
break;
case SHT_SYMTAB:
tmp = "SHT_SYMTAB";
break;
case SHT_STRTAB:
tmp = "SHT_STRTAB";
break;
case SHT_RELA:
tmp = "SHT_RELA";
break;
case SHT_HASH:
tmp = "SHT_HASH";
break;
case SHT_DYNAMIC:
tmp = "SHT_DYNAMIC";
break;
case SHT_NOTE:
tmp = "SHT_NOTE";
break;
case SHT_NOBITS:
tmp = "SHT_NOBITS";
break;
case SHT_REL:
tmp = "SHT_REL";
break;
case SHT_SHLIB:
tmp = "SHT_SHLIB";
break;
case SHT_DYNSYM:
tmp = "SHT_DYNSYM";
break;
case SHT_LOPROC:
tmp = "SHT_LOPROC";
break;
case SHT_HIPROC:
tmp = "SHT_HIPROC";
break;
case SHT_LOUSER:
tmp = "SHT_LOUSER";
break;
case SHT_HIUSER:
tmp = "SHT_HIUSER";
break;
default:
break;
}
if (strlen(name) > 15) {
strcpy(&name[15 - 6], "[...]");
}
strcpy(flag, " ");
flag2str_sh(shdr[i].sh_flags, flag);
PRINT_SECTION(i, name, tmp, shdr[i].sh_addr, shdr[i].sh_offset, shdr[i].sh_size, shdr[i].sh_entsize, \
flag, shdr[i].sh_link, shdr[i].sh_info, shdr[i].sh_addralign);
}
INFO("Program Header Table\n");
PRINT_PROGRAM_TITLE("Nr", "Type", "Offset", "Virtaddr", "Physaddr", "Filesiz", "Memsiz", "Flg", "Align");
for (int i = 0; i < ehdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_NULL:
tmp = "PT_NULL";
break;
case PT_LOAD:
tmp = "PT_LOAD";
break;
case PT_DYNAMIC:
tmp = "PT_DYNAMIC";
break;
case PT_INTERP:
tmp = "PT_INTERP";
break;
case PT_NOTE:
tmp = "PT_NOTE";
break;
case PT_SHLIB:
tmp = "PT_SHLIB";
break;
case PT_PHDR:
tmp = "PT_PHDR";
break;
case PT_LOPROC:
tmp = "PT_LOPROC";
break;
case PT_HIPROC:
tmp = "PT_HIPROC";
break;
case PT_GNU_STACK:
tmp = "PT_GNU_STACK";
break;
default:
break;
}
strcpy(flag, " ");
flag2str(phdr[i].p_flags, flag);
PRINT_PROGRAM(i, tmp, phdr[i].p_offset, phdr[i].p_vaddr, phdr[i].p_paddr, phdr[i].p_filesz, phdr[i].p_memsz, flag, phdr[i].p_align);
}
INFO("Section to segment mapping\n");
for (int i = 0; i < ehdr->e_phnum; i++) {
printf(" [%2d]", i);
for (int j = 0; j < ehdr->e_shnum; j++) {
name = elf_map + shstrtab.sh_offset + shdr[j].sh_name;
if (shdr[j].sh_addr >= phdr[i].p_vaddr && shdr[j].sh_addr + shdr[j].sh_size <= phdr[i].p_vaddr + phdr[i].p_memsz && shdr[j].sh_type != SHT_NULL) {
if (shdr[j].sh_flags >> 1 & 0x1) {
printf(" %s", name);
}
}
}
printf("\n");
}
INFO("Dynamic link information\n");
int dynstr;
int dynamic;
Elf32_Dyn *dyn;
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (!strcmp(name, ".dynstr")) {
dynstr = i;
}
if (!strcmp(name, ".dynamic")) {
dynamic = i;
}
}
char value[50];
name = "";
dyn = (Elf32_Dyn *)&elf_map[shdr[dynamic].sh_offset];
count = shdr[dynamic].sh_size / sizeof(Elf32_Dyn);
INFO("Dynamic section at offset 0x%x contains %d entries\n", shdr[dynamic].sh_offset, count);
PRINT_DYN_TITLE("Tag", "Type", "Name/Value");
for(int i = 0; i < count; i++) {
tmp = "";
memset(value, 0, 50);
snprintf(value, 50, "0x%x", dyn[i].d_un.d_val);
switch (dyn[i].d_tag) {
/* Legal values for d_tag (dynamic entry type). */
case DT_NULL:
tmp = "DT_NULL";
break;
case DT_NEEDED:
tmp = "DT_NEEDED";
name = elf_map + shdr[dynstr].sh_offset + dyn[i].d_un.d_val;
snprintf(value, 50, "Shared library: [%s]", name);
break;
case DT_PLTRELSZ:
tmp = "DT_PLTRELSZ";
break;
case DT_PLTGOT:
tmp = "DT_PLTGOT";
break;
case DT_HASH:
tmp = "DT_HASH";
break;
case DT_STRTAB:
tmp = "DT_STRTAB";
break;
case DT_SYMTAB:
tmp = "DT_SYMTAB";
break;
case DT_RELA:
tmp = "DT_RELA";
break;
case DT_RELASZ:
tmp = "DT_RELASZ";
break;
case DT_RELAENT:
tmp = "DT_RELAENT";
break;
case DT_STRSZ:
tmp = "DT_STRSZ";
break;
case DT_SYMENT:
tmp = "DT_SYMENT";
break;
case DT_INIT:
tmp = "DT_INIT";
break;
case DT_FINI:
tmp = "DT_FINI";
break;
case DT_SONAME:
tmp = "DT_SONAME";
break;
case DT_RPATH:
tmp = "DT_RPATH";
break;
case DT_SYMBOLIC:
tmp = "DT_SYMBOLIC";
break;
case DT_REL:
tmp = "DT_REL";
break;
case DT_RELSZ:
tmp = "DT_RELSZ";
break;
case DT_RELENT:
tmp = "DT_RELENT";
break;
case DT_PLTREL:
tmp = "DT_PLTREL";
break;
case DT_DEBUG:
tmp = "DT_DEBUG";
break;
case DT_TEXTREL:
tmp = "DT_TEXTREL";
break;
case DT_JMPREL:
tmp = "DT_JMPREL";
break;
case DT_BIND_NOW:
tmp = "DT_BIND_NOW";
break;
case DT_INIT_ARRAY:
tmp = "DT_INIT_ARRAY";
break;
case DT_FINI_ARRAY:
tmp = "DT_FINI_ARRAY";
break;
case DT_INIT_ARRAYSZ:
tmp = "DT_INIT_ARRAYSZ";
break;
case DT_FINI_ARRAYSZ:
tmp = "DT_FINI_ARRAYSZ";
break;
case DT_RUNPATH:
tmp = "DT_RUNPATH";
break;
case DT_FLAGS:
tmp = "DT_FLAGS";
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
case DT_ENCODING:
tmp = "DT_ENCODING";
break;
case DT_PREINIT_ARRAYSZ:
tmp = "DT_PREINIT_ARRAYSZ";
break;
case DT_SYMTAB_SHNDX:
tmp = "DT_SYMTAB_SHNDX";
break;
case DT_NUM:
tmp = "DT_NUM";
break;
case DT_LOOS:
tmp = "DT_LOOS";
break;
case DT_HIOS:
tmp = "DT_HIOS";
break;
case DT_LOPROC:
tmp = "DT_LOPROC";
break;
case DT_HIPROC:
tmp = "DT_HIPROC";
break;
case DT_PROCNUM:
tmp = "DT_LOPROC";
break;
/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
* Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
* approach. */
case DT_VALRNGLO:
tmp = "DT_VALRNGLO";
break;
case DT_GNU_PRELINKED:
tmp = "DT_GNU_PRELINKED";
break;
case DT_GNU_CONFLICTSZ:
tmp = "DT_GNU_CONFLICTSZ";
break;
case DT_GNU_LIBLISTSZ:
tmp = "DT_GNU_LIBLISTSZ";
break;
case DT_CHECKSUM:
tmp = "DT_CHECKSUM";
break;
case DT_PLTPADSZ:
tmp = "DT_PLTPADSZ";
break;
case DT_MOVEENT:
tmp = "DT_MOVEENT";
break;
case DT_MOVESZ:
tmp = "DT_MOVESZ";
break;
case DT_FEATURE_1:
tmp = "DT_FEATURE_1";
break;
case DT_POSFLAG_1:
tmp = "DT_POSFLAG_1";
break;
case DT_SYMINSZ:
tmp = "DT_SYMINSZ";
break;
case DT_SYMINENT:
tmp = "DT_SYMINENT";
break;
/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
* Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
* If any adjustment is made to the ELF object after it has been
* built these entries will need to be adjusted. */
case DT_ADDRRNGLO:
tmp = "DT_ADDRRNGLO";
break;
case DT_GNU_HASH:
tmp = "DT_GNU_HASH";
break;
case DT_TLSDESC_PLT:
tmp = "DT_TLSDESC_PLT";
break;
case DT_TLSDESC_GOT:
tmp = "DT_TLSDESC_GOT";
break;
case DT_GNU_CONFLICT:
tmp = "DT_GNU_CONFLICT";
break;
case DT_GNU_LIBLIST:
tmp = "DT_GNU_LIBLIST";
break;
case DT_CONFIG:
tmp = "DT_CONFIG";
break;
case DT_DEPAUDIT:
tmp = "DT_DEPAUDIT";
break;
case DT_AUDIT:
tmp = "DT_AUDIT";
break;
case DT_PLTPAD:
tmp = "DT_PLTPAD";
break;
case DT_MOVETAB:
tmp = "DT_MOVETAB";
break;
case DT_SYMINFO:
tmp = "DT_SYMINFO";
break;
/* The versioning entry types. The next are defined as part of the
* GNU extension. */
case DT_VERSYM:
tmp = "DT_VERSYM";
break;
case DT_RELACOUNT:
tmp = "DT_RELACOUNT";
break;
case DT_RELCOUNT:
tmp = "DT_RELCOUNT";
break;
/* These were chosen by Sun. */
case DT_FLAGS_1:
tmp = "DT_FLAGS_1";
switch (dyn[i].d_un.d_val) {
case DF_1_PIE:
snprintf(value, 50, "Flags: %s", "PIE");
break;
default:
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
}
break;
case DT_VERDEF:
tmp = "DT_VERDEF";
break;
case DT_VERDEFNUM:
tmp = "DT_VERDEFNUM";
break;
case DT_VERNEED:
tmp = "DT_VERNEED";
break;
case DT_VERNEEDNUM:
tmp = "DT_VERNEEDNUM";
break;
default:
break;
}
PRINT_DYN(dyn[i].d_tag, tmp, value);
}
}
/* 64bit */
if (MODE == ELFCLASS64) {
/* ELF Header Information */
Elf64_Ehdr *ehdr;
ehdr = (Elf64_Ehdr *)elf_map;
INFO("ELF Header\n");
switch (ehdr->e_type) {
case ET_NONE:
tmp = "An unknown type";
break;
case ET_REL:
tmp = "A relocatable file";
break;
case ET_EXEC:
tmp = "An executable file";
break;
case ET_DYN:
tmp = "A shared object";
break;
case ET_CORE:
tmp = "A core file";
break;
default:
tmp = "An unknown type";
break;
}
PRINT_HEADER_EXP("e_type:", ehdr->e_type, tmp);
switch (ehdr->e_type) {
case EM_NONE:
tmp = "An unknown machine";
break;
case EM_M32:
tmp = "AT&T WE 32100";
break;
case EM_SPARC:
tmp = "Sun Microsystems SPARC";
break;
case EM_386:
tmp = "Intel 80386";
break;
case EM_68K:
tmp = "Motorola 68000";
break;
case EM_88K:
tmp = "Motorola 88000";
break;
case EM_860:
tmp = "Intel 80860";
break;
case EM_MIPS:
tmp = "MIPS RS3000 (big-endian only)";
break;
case EM_PARISC:
tmp = "HP/PA";
break;
case EM_SPARC32PLUS:
tmp = "SPARC with enhanced instruction set";
break;
case EM_PPC:
tmp = "PowerPC";
break;
case EM_PPC64:
tmp = "PowerPC 64-bit";
break;
case EM_S390:
tmp = "IBM S/390";
break;
case EM_ARM:
tmp = "Advanced RISC Machines";
break;
case EM_SH:
tmp = "Renesas SuperH";
break;
case EM_SPARCV9:
tmp = "SPARC v9 64-bit";
break;
case EM_IA_64:
tmp = "Intel Itanium";
break;
case EM_X86_64:
tmp = "AMD x86-64";
break;
case EM_VAX:
tmp = "DEC Vax";
break;
default:
tmp = "An unknown machine";
break;
}
PRINT_HEADER_EXP("e_machine:", ehdr->e_machine, tmp);
switch (ehdr->e_version) {
case EV_NONE:
tmp = "Invalid version";
break;
case EV_CURRENT:
tmp = "Current version";
break;
default:
tmp = "Known version";
break;
}
PRINT_HEADER_EXP("e_version:", ehdr->e_version, tmp);
PRINT_HEADER("e_entry:", ehdr->e_entry);
PRINT_HEADER("e_phoff:", ehdr->e_phoff);
PRINT_HEADER("e_shoff:", ehdr->e_shoff);
PRINT_HEADER("e_flags:", ehdr->e_flags);
PRINT_HEADER("e_ehsize:", ehdr->e_ehsize);
PRINT_HEADER("e_phentsize:", ehdr->e_phentsize);
PRINT_HEADER("e_phnum:", ehdr->e_phnum);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shstrndx:", ehdr->e_shstrndx);
/* Section Information */
Elf64_Shdr *shdr;
Elf64_Phdr *phdr;
Elf64_Shdr shstrtab;
shdr = (Elf64_Shdr *)&elf_map[ehdr->e_shoff];
phdr = (Elf64_Phdr *)&elf_map[ehdr->e_phoff];
shstrtab = shdr[ehdr->e_shstrndx];
INFO("Section Header Table\n");
PRINT_SECTION_TITLE("Nr", "Name", "Type", "Addr", "Off", "Size", "Es", "Flg", "Lk", "Inf", "Al");
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
switch (shdr[i].sh_type) {
case SHT_NULL:
tmp = "SHT_NULL";
break;
case SHT_PROGBITS:
tmp = "SHT_PROGBITS";
break;
case SHT_SYMTAB:
tmp = "SHT_SYMTAB";
break;
case SHT_STRTAB:
tmp = "SHT_STRTAB";
break;
case SHT_RELA:
tmp = "SHT_RELA";
break;
case SHT_HASH:
tmp = "SHT_HASH";
break;
case SHT_DYNAMIC:
tmp = "SHT_DYNAMIC";
break;
case SHT_NOTE:
tmp = "SHT_NOTE";
break;
case SHT_NOBITS:
tmp = "SHT_NOBITS";
break;
case SHT_REL:
tmp = "SHT_REL";
break;
case SHT_SHLIB:
tmp = "SHT_SHLIB";
break;
case SHT_DYNSYM:
tmp = "SHT_DYNSYM";
break;
case SHT_LOPROC:
tmp = "SHT_LOPROC";
break;
case SHT_HIPROC:
tmp = "SHT_HIPROC";
break;
case SHT_LOUSER:
tmp = "SHT_LOUSER";
break;
case SHT_HIUSER:
tmp = "SHT_HIUSER";
break;
default:
break;
}
if (strlen(name) > 15) {
strcpy(&name[15 - 6], "[...]");
}
strcpy(flag, " ");
flag2str_sh(shdr[i].sh_flags, flag);
PRINT_SECTION(i, name, tmp, shdr[i].sh_addr, shdr[i].sh_offset, shdr[i].sh_size, shdr[i].sh_entsize, \
flag, shdr[i].sh_link, shdr[i].sh_info, shdr[i].sh_addralign);
}
INFO("Program Header Table\n");
PRINT_PROGRAM_TITLE("Nr", "Type", "Offset", "Virtaddr", "Physaddr", "Filesiz", "Memsiz", "Flg", "Align");
for (int i = 0; i < ehdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_NULL:
tmp = "PT_NULL";
break;
case PT_LOAD:
tmp = "PT_LOAD";
break;
case PT_DYNAMIC:
tmp = "PT_DYNAMIC";
break;
case PT_INTERP:
tmp = "PT_INTERP";
break;
case PT_NOTE:
tmp = "PT_NOTE";
break;
case PT_SHLIB:
tmp = "PT_SHLIB";
break;
case PT_PHDR:
tmp = "PT_PHDR";
break;
case PT_LOPROC:
tmp = "PT_LOPROC";
break;
case PT_HIPROC:
tmp = "PT_HIPROC";
break;
case PT_GNU_STACK:
tmp = "PT_GNU_STACK";
break;
default:
break;
}
strcpy(flag, " ");
flag2str(phdr[i].p_flags, flag);
PRINT_PROGRAM(i, tmp, phdr[i].p_offset, phdr[i].p_vaddr, phdr[i].p_paddr, phdr[i].p_filesz, phdr[i].p_memsz, flag, phdr[i].p_align);
}
INFO("Section to segment mapping\n");
for (int i = 0; i < ehdr->e_phnum; i++) {
printf(" [%2d]", i);
for (int j = 0; j < ehdr->e_shnum; j++) {
name = elf_map + shstrtab.sh_offset + shdr[j].sh_name;
if (shdr[j].sh_addr >= phdr[i].p_vaddr && shdr[j].sh_addr + shdr[j].sh_size <= phdr[i].p_vaddr + phdr[i].p_memsz && shdr[j].sh_type != SHT_NULL) {
if (shdr[j].sh_flags >> 1 & 0x1) {
printf(" %s", name);
}
}
}
printf("\n");
}
INFO("Dynamic link information\n");
int dynstr;
int dynamic;
Elf64_Dyn *dyn;
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (!strcmp(name, ".dynstr")) {
dynstr = i;
}
if (!strcmp(name, ".dynamic")) {
dynamic = i;
}
}
char value[50];
name = "";
dyn = (Elf64_Dyn *)&elf_map[shdr[dynamic].sh_offset];
count = shdr[dynamic].sh_size / sizeof(Elf64_Dyn);
INFO("Dynamic section at offset 0x%x contains %d entries\n", shdr[dynamic].sh_offset, count);
PRINT_DYN_TITLE("Tag", "Type", "Name/Value");
for(int i = 0; i < count; i++) {
tmp = "";
memset(value, 0, 50);
snprintf(value, 50, "0x%x", dyn[i].d_un.d_val);
switch (dyn[i].d_tag) {
/* Legal values for d_tag (dynamic entry type). */
case DT_NULL:
tmp = "DT_NULL";
break;
case DT_NEEDED:
tmp = "DT_NEEDED";
name = elf_map + shdr[dynstr].sh_offset + dyn[i].d_un.d_val;
snprintf(value, 50, "Shared library: [%s]", name);
break;
case DT_PLTRELSZ:
tmp = "DT_PLTRELSZ";
break;
case DT_PLTGOT:
tmp = "DT_PLTGOT";
break;
case DT_HASH:
tmp = "DT_HASH";
break;
case DT_STRTAB:
tmp = "DT_STRTAB";
break;
case DT_SYMTAB:
tmp = "DT_SYMTAB";
break;
case DT_RELA:
tmp = "DT_RELA";
break;
case DT_RELASZ:
tmp = "DT_RELASZ";
break;
case DT_RELAENT:
tmp = "DT_RELAENT";
break;
case DT_STRSZ:
tmp = "DT_STRSZ";
break;
case DT_SYMENT:
tmp = "DT_SYMENT";
break;
case DT_INIT:
tmp = "DT_INIT";
break;
case DT_FINI:
tmp = "DT_FINI";
break;
case DT_SONAME:
tmp = "DT_SONAME";
break;
case DT_RPATH:
tmp = "DT_RPATH";
break;
case DT_SYMBOLIC:
tmp = "DT_SYMBOLIC";
break;
case DT_REL:
tmp = "DT_REL";
break;
case DT_RELSZ:
tmp = "DT_RELSZ";
break;
case DT_RELENT:
tmp = "DT_RELENT";
break;
case DT_PLTREL:
tmp = "DT_PLTREL";
break;
case DT_DEBUG:
tmp = "DT_DEBUG";
break;
case DT_TEXTREL:
tmp = "DT_TEXTREL";
break;
case DT_JMPREL:
tmp = "DT_JMPREL";
break;
case DT_BIND_NOW:
tmp = "DT_BIND_NOW";
break;
case DT_INIT_ARRAY:
tmp = "DT_INIT_ARRAY";
break;
case DT_FINI_ARRAY:
tmp = "DT_FINI_ARRAY";
break;
case DT_INIT_ARRAYSZ:
tmp = "DT_INIT_ARRAYSZ";
break;
case DT_FINI_ARRAYSZ:
tmp = "DT_FINI_ARRAYSZ";
break;
case DT_RUNPATH:
tmp = "DT_RUNPATH";
break;
case DT_FLAGS:
tmp = "DT_FLAGS";
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
case DT_ENCODING:
tmp = "DT_ENCODING";
break;
case DT_PREINIT_ARRAYSZ:
tmp = "DT_PREINIT_ARRAYSZ";
break;
case DT_SYMTAB_SHNDX:
tmp = "DT_SYMTAB_SHNDX";
break;
case DT_NUM:
tmp = "DT_NUM";
break;
case DT_LOOS:
tmp = "DT_LOOS";
break;
case DT_HIOS:
tmp = "DT_HIOS";
break;
case DT_LOPROC:
tmp = "DT_LOPROC";
break;
case DT_HIPROC:
tmp = "DT_HIPROC";
break;
case DT_PROCNUM:
tmp = "DT_LOPROC";
break;
/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
* Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
* approach. */
case DT_VALRNGLO:
tmp = "DT_VALRNGLO";
break;
case DT_GNU_PRELINKED:
tmp = "DT_GNU_PRELINKED";
break;
case DT_GNU_CONFLICTSZ:
tmp = "DT_GNU_CONFLICTSZ";
break;
case DT_GNU_LIBLISTSZ:
tmp = "DT_GNU_LIBLISTSZ";
break;
case DT_CHECKSUM:
tmp = "DT_CHECKSUM";
break;
case DT_PLTPADSZ:
tmp = "DT_PLTPADSZ";
break;
case DT_MOVEENT:
tmp = "DT_MOVEENT";
break;
case DT_MOVESZ:
tmp = "DT_MOVESZ";
break;
case DT_FEATURE_1:
tmp = "DT_FEATURE_1";
break;
case DT_POSFLAG_1:
tmp = "DT_POSFLAG_1";
break;
case DT_SYMINSZ:
tmp = "DT_SYMINSZ";
break;
case DT_SYMINENT:
tmp = "DT_SYMINENT";
break;
/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
* Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
* If any adjustment is made to the ELF object after it has been
* built these entries will need to be adjusted. */
case DT_ADDRRNGLO:
tmp = "DT_ADDRRNGLO";
break;
case DT_GNU_HASH:
tmp = "DT_GNU_HASH";
break;
case DT_TLSDESC_PLT:
tmp = "DT_TLSDESC_PLT";
break;
case DT_TLSDESC_GOT:
tmp = "DT_TLSDESC_GOT";
break;
case DT_GNU_CONFLICT:
tmp = "DT_GNU_CONFLICT";
break;
case DT_GNU_LIBLIST:
tmp = "DT_GNU_LIBLIST";
break;
case DT_CONFIG:
tmp = "DT_CONFIG";
break;
case DT_DEPAUDIT:
tmp = "DT_DEPAUDIT";
break;
case DT_AUDIT:
tmp = "DT_AUDIT";
break;
case DT_PLTPAD:
tmp = "DT_PLTPAD";
break;
case DT_MOVETAB:
tmp = "DT_MOVETAB";
break;
case DT_SYMINFO:
tmp = "DT_SYMINFO";
break;
/* The versioning entry types. The next are defined as part of the
* GNU extension. */
case DT_VERSYM:
tmp = "DT_VERSYM";
break;
case DT_RELACOUNT:
tmp = "DT_RELACOUNT";
break;
case DT_RELCOUNT:
tmp = "DT_RELCOUNT";
break;
/* These were chosen by Sun. */
case DT_FLAGS_1:
tmp = "DT_FLAGS_1";
switch (dyn[i].d_un.d_val) {
case DF_1_PIE:
snprintf(value, 50, "Flags: %s", "PIE");
break;
default:
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
}
break;
case DT_VERDEF:
tmp = "DT_VERDEF";
break;
case DT_VERDEFNUM:
tmp = "DT_VERDEFNUM";
break;
case DT_VERNEED:
tmp = "DT_VERNEED";
break;
case DT_VERNEEDNUM:
tmp = "DT_VERNEEDNUM";
break;
default:
break;
}
PRINT_DYN(dyn[i].d_tag, tmp, value);
}
}
return 0;
}
|
10492118832219739041814229234332696880
|
None
|
CWE-125
|
CVE-2022-21711
|
elfspirit is an ELF static analysis and injection framework that parses, manipulates, and camouflages ELF files. When analyzing the ELF file format in versions prior to 1.1, there is an out-of-bounds read bug, which can lead to application crashes or information leakage. By constructing a special format ELF file, the information of any address can be leaked. elfspirit version 1.1 contains a patch for this issue.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21711
|
|
522,438
|
elfspirit
|
c5b0f5a9a24f2451bbeda4751d67633bc375e608
|
https://github.com/liyansong2018/elfspirit
|
https://github.com/liyansong2018/elfspirit/commit/c5b0f5a9a24f2451bbeda4751d67633bc375e608
|
Fix #1 about out-of-bounds
| 0
|
int parse(char *elf) {
int fd;
struct stat st;
uint8_t *elf_map;
int count;
char *tmp;
char *name;
char flag[4];
MODE = get_elf_class(elf);
fd = open(elf, O_RDONLY);
if (fd < 0) {
perror("open");
return -1;
}
if (fstat(fd, &st) < 0) {
perror("fstat");
return -1;
}
elf_map = mmap(0, st.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (elf_map == MAP_FAILED) {
perror("mmap");
return -1;
}
/* 32bit */
if (MODE == ELFCLASS32) {
/* ELF Header Information */
Elf32_Ehdr *ehdr;
ehdr = (Elf32_Ehdr *)elf_map;
INFO("ELF Header\n");
switch (ehdr->e_type) {
case ET_NONE:
tmp = "An unknown type";
break;
case ET_REL:
tmp = "A relocatable file";
break;
case ET_EXEC:
tmp = "An executable file";
break;
case ET_DYN:
tmp = "A shared object";
break;
case ET_CORE:
tmp = "A core file";
break;
default:
tmp = "An unknown type";
break;
}
PRINT_HEADER_EXP("e_type:", ehdr->e_type, tmp);
switch (ehdr->e_type) {
case EM_NONE:
tmp = "An unknown machine";
break;
case EM_M32:
tmp = "AT&T WE 32100";
break;
case EM_SPARC:
tmp = "Sun Microsystems SPARC";
break;
case EM_386:
tmp = "Intel 80386";
break;
case EM_68K:
tmp = "Motorola 68000";
break;
case EM_88K:
tmp = "Motorola 88000";
break;
case EM_860:
tmp = "Intel 80860";
break;
case EM_MIPS:
tmp = "MIPS RS3000 (big-endian only)";
break;
case EM_PARISC:
tmp = "HP/PA";
break;
case EM_SPARC32PLUS:
tmp = "SPARC with enhanced instruction set";
break;
case EM_PPC:
tmp = "PowerPC";
break;
case EM_PPC64:
tmp = "PowerPC 64-bit";
break;
case EM_S390:
tmp = "IBM S/390";
break;
case EM_ARM:
tmp = "Advanced RISC Machines";
break;
case EM_SH:
tmp = "Renesas SuperH";
break;
case EM_SPARCV9:
tmp = "SPARC v9 64-bit";
break;
case EM_IA_64:
tmp = "Intel Itanium";
break;
case EM_X86_64:
tmp = "AMD x86-64";
break;
case EM_VAX:
tmp = "DEC Vax";
break;
default:
tmp = "An unknown machine";
break;
}
PRINT_HEADER_EXP("e_machine:", ehdr->e_machine, tmp);
switch (ehdr->e_version) {
case EV_NONE:
tmp = "Invalid version";
break;
case EV_CURRENT:
tmp = "Current version";
break;
default:
tmp = "Known version";
break;
}
PRINT_HEADER_EXP("e_version:", ehdr->e_version, tmp);
PRINT_HEADER("e_entry:", ehdr->e_entry);
PRINT_HEADER("e_phoff:", ehdr->e_phoff);
PRINT_HEADER("e_shoff:", ehdr->e_shoff);
PRINT_HEADER("e_flags:", ehdr->e_flags);
PRINT_HEADER("e_ehsize:", ehdr->e_ehsize);
PRINT_HEADER("e_phentsize:", ehdr->e_phentsize);
PRINT_HEADER("e_phnum:", ehdr->e_phnum);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shstrndx:", ehdr->e_shstrndx);
/* Section Information */
Elf32_Shdr *shdr;
Elf32_Phdr *phdr;
Elf32_Shdr shstrtab;
shdr = (Elf32_Shdr *)&elf_map[ehdr->e_shoff];
phdr = (Elf32_Phdr *)&elf_map[ehdr->e_phoff];
shstrtab = shdr[ehdr->e_shstrndx];
INFO("Section Header Table\n");
PRINT_SECTION_TITLE("Nr", "Name", "Type", "Addr", "Off", "Size", "Es", "Flg", "Lk", "Inf", "Al");
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (validated_offset(name, elf_map, elf_map + st.st_size)) {
ERROR("Corrupt file format\n");
return -1;
}
switch (shdr[i].sh_type) {
case SHT_NULL:
tmp = "SHT_NULL";
break;
case SHT_PROGBITS:
tmp = "SHT_PROGBITS";
break;
case SHT_SYMTAB:
tmp = "SHT_SYMTAB";
break;
case SHT_STRTAB:
tmp = "SHT_STRTAB";
break;
case SHT_RELA:
tmp = "SHT_RELA";
break;
case SHT_HASH:
tmp = "SHT_HASH";
break;
case SHT_DYNAMIC:
tmp = "SHT_DYNAMIC";
break;
case SHT_NOTE:
tmp = "SHT_NOTE";
break;
case SHT_NOBITS:
tmp = "SHT_NOBITS";
break;
case SHT_REL:
tmp = "SHT_REL";
break;
case SHT_SHLIB:
tmp = "SHT_SHLIB";
break;
case SHT_DYNSYM:
tmp = "SHT_DYNSYM";
break;
case SHT_LOPROC:
tmp = "SHT_LOPROC";
break;
case SHT_HIPROC:
tmp = "SHT_HIPROC";
break;
case SHT_LOUSER:
tmp = "SHT_LOUSER";
break;
case SHT_HIUSER:
tmp = "SHT_HIUSER";
break;
default:
break;
}
if (strlen(name) > 15) {
strcpy(&name[15 - 6], "[...]");
}
strcpy(flag, " ");
flag2str_sh(shdr[i].sh_flags, flag);
PRINT_SECTION(i, name, tmp, shdr[i].sh_addr, shdr[i].sh_offset, shdr[i].sh_size, shdr[i].sh_entsize, \
flag, shdr[i].sh_link, shdr[i].sh_info, shdr[i].sh_addralign);
}
INFO("Program Header Table\n");
PRINT_PROGRAM_TITLE("Nr", "Type", "Offset", "Virtaddr", "Physaddr", "Filesiz", "Memsiz", "Flg", "Align");
for (int i = 0; i < ehdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_NULL:
tmp = "PT_NULL";
break;
case PT_LOAD:
tmp = "PT_LOAD";
break;
case PT_DYNAMIC:
tmp = "PT_DYNAMIC";
break;
case PT_INTERP:
tmp = "PT_INTERP";
break;
case PT_NOTE:
tmp = "PT_NOTE";
break;
case PT_SHLIB:
tmp = "PT_SHLIB";
break;
case PT_PHDR:
tmp = "PT_PHDR";
break;
case PT_LOPROC:
tmp = "PT_LOPROC";
break;
case PT_HIPROC:
tmp = "PT_HIPROC";
break;
case PT_GNU_STACK:
tmp = "PT_GNU_STACK";
break;
default:
break;
}
strcpy(flag, " ");
flag2str(phdr[i].p_flags, flag);
PRINT_PROGRAM(i, tmp, phdr[i].p_offset, phdr[i].p_vaddr, phdr[i].p_paddr, phdr[i].p_filesz, phdr[i].p_memsz, flag, phdr[i].p_align);
}
INFO("Section to segment mapping\n");
for (int i = 0; i < ehdr->e_phnum; i++) {
printf(" [%2d]", i);
for (int j = 0; j < ehdr->e_shnum; j++) {
name = elf_map + shstrtab.sh_offset + shdr[j].sh_name;
if (shdr[j].sh_addr >= phdr[i].p_vaddr && shdr[j].sh_addr + shdr[j].sh_size <= phdr[i].p_vaddr + phdr[i].p_memsz && shdr[j].sh_type != SHT_NULL) {
if (shdr[j].sh_flags >> 1 & 0x1) {
printf(" %s", name);
}
}
}
printf("\n");
}
INFO("Dynamic link information\n");
int dynstr;
int dynamic;
Elf32_Dyn *dyn;
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (!strcmp(name, ".dynstr")) {
dynstr = i;
}
if (!strcmp(name, ".dynamic")) {
dynamic = i;
}
}
char value[50];
name = "";
dyn = (Elf32_Dyn *)&elf_map[shdr[dynamic].sh_offset];
count = shdr[dynamic].sh_size / sizeof(Elf32_Dyn);
INFO("Dynamic section at offset 0x%x contains %d entries\n", shdr[dynamic].sh_offset, count);
PRINT_DYN_TITLE("Tag", "Type", "Name/Value");
for(int i = 0; i < count; i++) {
tmp = "";
memset(value, 0, 50);
snprintf(value, 50, "0x%x", dyn[i].d_un.d_val);
switch (dyn[i].d_tag) {
/* Legal values for d_tag (dynamic entry type). */
case DT_NULL:
tmp = "DT_NULL";
break;
case DT_NEEDED:
tmp = "DT_NEEDED";
name = elf_map + shdr[dynstr].sh_offset + dyn[i].d_un.d_val;
snprintf(value, 50, "Shared library: [%s]", name);
break;
case DT_PLTRELSZ:
tmp = "DT_PLTRELSZ";
break;
case DT_PLTGOT:
tmp = "DT_PLTGOT";
break;
case DT_HASH:
tmp = "DT_HASH";
break;
case DT_STRTAB:
tmp = "DT_STRTAB";
break;
case DT_SYMTAB:
tmp = "DT_SYMTAB";
break;
case DT_RELA:
tmp = "DT_RELA";
break;
case DT_RELASZ:
tmp = "DT_RELASZ";
break;
case DT_RELAENT:
tmp = "DT_RELAENT";
break;
case DT_STRSZ:
tmp = "DT_STRSZ";
break;
case DT_SYMENT:
tmp = "DT_SYMENT";
break;
case DT_INIT:
tmp = "DT_INIT";
break;
case DT_FINI:
tmp = "DT_FINI";
break;
case DT_SONAME:
tmp = "DT_SONAME";
break;
case DT_RPATH:
tmp = "DT_RPATH";
break;
case DT_SYMBOLIC:
tmp = "DT_SYMBOLIC";
break;
case DT_REL:
tmp = "DT_REL";
break;
case DT_RELSZ:
tmp = "DT_RELSZ";
break;
case DT_RELENT:
tmp = "DT_RELENT";
break;
case DT_PLTREL:
tmp = "DT_PLTREL";
break;
case DT_DEBUG:
tmp = "DT_DEBUG";
break;
case DT_TEXTREL:
tmp = "DT_TEXTREL";
break;
case DT_JMPREL:
tmp = "DT_JMPREL";
break;
case DT_BIND_NOW:
tmp = "DT_BIND_NOW";
break;
case DT_INIT_ARRAY:
tmp = "DT_INIT_ARRAY";
break;
case DT_FINI_ARRAY:
tmp = "DT_FINI_ARRAY";
break;
case DT_INIT_ARRAYSZ:
tmp = "DT_INIT_ARRAYSZ";
break;
case DT_FINI_ARRAYSZ:
tmp = "DT_FINI_ARRAYSZ";
break;
case DT_RUNPATH:
tmp = "DT_RUNPATH";
break;
case DT_FLAGS:
tmp = "DT_FLAGS";
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
case DT_ENCODING:
tmp = "DT_ENCODING";
break;
case DT_PREINIT_ARRAYSZ:
tmp = "DT_PREINIT_ARRAYSZ";
break;
case DT_SYMTAB_SHNDX:
tmp = "DT_SYMTAB_SHNDX";
break;
case DT_NUM:
tmp = "DT_NUM";
break;
case DT_LOOS:
tmp = "DT_LOOS";
break;
case DT_HIOS:
tmp = "DT_HIOS";
break;
case DT_LOPROC:
tmp = "DT_LOPROC";
break;
case DT_HIPROC:
tmp = "DT_HIPROC";
break;
case DT_PROCNUM:
tmp = "DT_LOPROC";
break;
/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
* Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
* approach. */
case DT_VALRNGLO:
tmp = "DT_VALRNGLO";
break;
case DT_GNU_PRELINKED:
tmp = "DT_GNU_PRELINKED";
break;
case DT_GNU_CONFLICTSZ:
tmp = "DT_GNU_CONFLICTSZ";
break;
case DT_GNU_LIBLISTSZ:
tmp = "DT_GNU_LIBLISTSZ";
break;
case DT_CHECKSUM:
tmp = "DT_CHECKSUM";
break;
case DT_PLTPADSZ:
tmp = "DT_PLTPADSZ";
break;
case DT_MOVEENT:
tmp = "DT_MOVEENT";
break;
case DT_MOVESZ:
tmp = "DT_MOVESZ";
break;
case DT_FEATURE_1:
tmp = "DT_FEATURE_1";
break;
case DT_POSFLAG_1:
tmp = "DT_POSFLAG_1";
break;
case DT_SYMINSZ:
tmp = "DT_SYMINSZ";
break;
case DT_SYMINENT:
tmp = "DT_SYMINENT";
break;
/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
* Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
* If any adjustment is made to the ELF object after it has been
* built these entries will need to be adjusted. */
case DT_ADDRRNGLO:
tmp = "DT_ADDRRNGLO";
break;
case DT_GNU_HASH:
tmp = "DT_GNU_HASH";
break;
case DT_TLSDESC_PLT:
tmp = "DT_TLSDESC_PLT";
break;
case DT_TLSDESC_GOT:
tmp = "DT_TLSDESC_GOT";
break;
case DT_GNU_CONFLICT:
tmp = "DT_GNU_CONFLICT";
break;
case DT_GNU_LIBLIST:
tmp = "DT_GNU_LIBLIST";
break;
case DT_CONFIG:
tmp = "DT_CONFIG";
break;
case DT_DEPAUDIT:
tmp = "DT_DEPAUDIT";
break;
case DT_AUDIT:
tmp = "DT_AUDIT";
break;
case DT_PLTPAD:
tmp = "DT_PLTPAD";
break;
case DT_MOVETAB:
tmp = "DT_MOVETAB";
break;
case DT_SYMINFO:
tmp = "DT_SYMINFO";
break;
/* The versioning entry types. The next are defined as part of the
* GNU extension. */
case DT_VERSYM:
tmp = "DT_VERSYM";
break;
case DT_RELACOUNT:
tmp = "DT_RELACOUNT";
break;
case DT_RELCOUNT:
tmp = "DT_RELCOUNT";
break;
/* These were chosen by Sun. */
case DT_FLAGS_1:
tmp = "DT_FLAGS_1";
switch (dyn[i].d_un.d_val) {
case DF_1_PIE:
snprintf(value, 50, "Flags: %s", "PIE");
break;
default:
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
}
break;
case DT_VERDEF:
tmp = "DT_VERDEF";
break;
case DT_VERDEFNUM:
tmp = "DT_VERDEFNUM";
break;
case DT_VERNEED:
tmp = "DT_VERNEED";
break;
case DT_VERNEEDNUM:
tmp = "DT_VERNEEDNUM";
break;
default:
break;
}
PRINT_DYN(dyn[i].d_tag, tmp, value);
}
}
/* 64bit */
if (MODE == ELFCLASS64) {
/* ELF Header Information */
Elf64_Ehdr *ehdr;
ehdr = (Elf64_Ehdr *)elf_map;
INFO("ELF Header\n");
switch (ehdr->e_type) {
case ET_NONE:
tmp = "An unknown type";
break;
case ET_REL:
tmp = "A relocatable file";
break;
case ET_EXEC:
tmp = "An executable file";
break;
case ET_DYN:
tmp = "A shared object";
break;
case ET_CORE:
tmp = "A core file";
break;
default:
tmp = "An unknown type";
break;
}
PRINT_HEADER_EXP("e_type:", ehdr->e_type, tmp);
switch (ehdr->e_type) {
case EM_NONE:
tmp = "An unknown machine";
break;
case EM_M32:
tmp = "AT&T WE 32100";
break;
case EM_SPARC:
tmp = "Sun Microsystems SPARC";
break;
case EM_386:
tmp = "Intel 80386";
break;
case EM_68K:
tmp = "Motorola 68000";
break;
case EM_88K:
tmp = "Motorola 88000";
break;
case EM_860:
tmp = "Intel 80860";
break;
case EM_MIPS:
tmp = "MIPS RS3000 (big-endian only)";
break;
case EM_PARISC:
tmp = "HP/PA";
break;
case EM_SPARC32PLUS:
tmp = "SPARC with enhanced instruction set";
break;
case EM_PPC:
tmp = "PowerPC";
break;
case EM_PPC64:
tmp = "PowerPC 64-bit";
break;
case EM_S390:
tmp = "IBM S/390";
break;
case EM_ARM:
tmp = "Advanced RISC Machines";
break;
case EM_SH:
tmp = "Renesas SuperH";
break;
case EM_SPARCV9:
tmp = "SPARC v9 64-bit";
break;
case EM_IA_64:
tmp = "Intel Itanium";
break;
case EM_X86_64:
tmp = "AMD x86-64";
break;
case EM_VAX:
tmp = "DEC Vax";
break;
default:
tmp = "An unknown machine";
break;
}
PRINT_HEADER_EXP("e_machine:", ehdr->e_machine, tmp);
switch (ehdr->e_version) {
case EV_NONE:
tmp = "Invalid version";
break;
case EV_CURRENT:
tmp = "Current version";
break;
default:
tmp = "Known version";
break;
}
PRINT_HEADER_EXP("e_version:", ehdr->e_version, tmp);
PRINT_HEADER("e_entry:", ehdr->e_entry);
PRINT_HEADER("e_phoff:", ehdr->e_phoff);
PRINT_HEADER("e_shoff:", ehdr->e_shoff);
PRINT_HEADER("e_flags:", ehdr->e_flags);
PRINT_HEADER("e_ehsize:", ehdr->e_ehsize);
PRINT_HEADER("e_phentsize:", ehdr->e_phentsize);
PRINT_HEADER("e_phnum:", ehdr->e_phnum);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shentsize:", ehdr->e_shentsize);
PRINT_HEADER("e_shstrndx:", ehdr->e_shstrndx);
/* Section Information */
Elf64_Shdr *shdr;
Elf64_Phdr *phdr;
Elf64_Shdr shstrtab;
shdr = (Elf64_Shdr *)&elf_map[ehdr->e_shoff];
phdr = (Elf64_Phdr *)&elf_map[ehdr->e_phoff];
shstrtab = shdr[ehdr->e_shstrndx];
INFO("Section Header Table\n");
PRINT_SECTION_TITLE("Nr", "Name", "Type", "Addr", "Off", "Size", "Es", "Flg", "Lk", "Inf", "Al");
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (validated_offset(name, elf_map, elf_map + st.st_size)) {
ERROR("Corrupt file format\n");
return -1;
}
switch (shdr[i].sh_type) {
case SHT_NULL:
tmp = "SHT_NULL";
break;
case SHT_PROGBITS:
tmp = "SHT_PROGBITS";
break;
case SHT_SYMTAB:
tmp = "SHT_SYMTAB";
break;
case SHT_STRTAB:
tmp = "SHT_STRTAB";
break;
case SHT_RELA:
tmp = "SHT_RELA";
break;
case SHT_HASH:
tmp = "SHT_HASH";
break;
case SHT_DYNAMIC:
tmp = "SHT_DYNAMIC";
break;
case SHT_NOTE:
tmp = "SHT_NOTE";
break;
case SHT_NOBITS:
tmp = "SHT_NOBITS";
break;
case SHT_REL:
tmp = "SHT_REL";
break;
case SHT_SHLIB:
tmp = "SHT_SHLIB";
break;
case SHT_DYNSYM:
tmp = "SHT_DYNSYM";
break;
case SHT_LOPROC:
tmp = "SHT_LOPROC";
break;
case SHT_HIPROC:
tmp = "SHT_HIPROC";
break;
case SHT_LOUSER:
tmp = "SHT_LOUSER";
break;
case SHT_HIUSER:
tmp = "SHT_HIUSER";
break;
default:
break;
}
if (strlen(name) > 15) {
strcpy(&name[15 - 6], "[...]");
}
strcpy(flag, " ");
flag2str_sh(shdr[i].sh_flags, flag);
PRINT_SECTION(i, name, tmp, shdr[i].sh_addr, shdr[i].sh_offset, shdr[i].sh_size, shdr[i].sh_entsize, \
flag, shdr[i].sh_link, shdr[i].sh_info, shdr[i].sh_addralign);
}
INFO("Program Header Table\n");
PRINT_PROGRAM_TITLE("Nr", "Type", "Offset", "Virtaddr", "Physaddr", "Filesiz", "Memsiz", "Flg", "Align");
for (int i = 0; i < ehdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_NULL:
tmp = "PT_NULL";
break;
case PT_LOAD:
tmp = "PT_LOAD";
break;
case PT_DYNAMIC:
tmp = "PT_DYNAMIC";
break;
case PT_INTERP:
tmp = "PT_INTERP";
break;
case PT_NOTE:
tmp = "PT_NOTE";
break;
case PT_SHLIB:
tmp = "PT_SHLIB";
break;
case PT_PHDR:
tmp = "PT_PHDR";
break;
case PT_LOPROC:
tmp = "PT_LOPROC";
break;
case PT_HIPROC:
tmp = "PT_HIPROC";
break;
case PT_GNU_STACK:
tmp = "PT_GNU_STACK";
break;
default:
break;
}
strcpy(flag, " ");
flag2str(phdr[i].p_flags, flag);
PRINT_PROGRAM(i, tmp, phdr[i].p_offset, phdr[i].p_vaddr, phdr[i].p_paddr, phdr[i].p_filesz, phdr[i].p_memsz, flag, phdr[i].p_align);
}
INFO("Section to segment mapping\n");
for (int i = 0; i < ehdr->e_phnum; i++) {
printf(" [%2d]", i);
for (int j = 0; j < ehdr->e_shnum; j++) {
name = elf_map + shstrtab.sh_offset + shdr[j].sh_name;
if (shdr[j].sh_addr >= phdr[i].p_vaddr && shdr[j].sh_addr + shdr[j].sh_size <= phdr[i].p_vaddr + phdr[i].p_memsz && shdr[j].sh_type != SHT_NULL) {
if (shdr[j].sh_flags >> 1 & 0x1) {
printf(" %s", name);
}
}
}
printf("\n");
}
INFO("Dynamic link information\n");
int dynstr;
int dynamic;
Elf64_Dyn *dyn;
for (int i = 0; i < ehdr->e_shnum; i++) {
name = elf_map + shstrtab.sh_offset + shdr[i].sh_name;
if (!strcmp(name, ".dynstr")) {
dynstr = i;
}
if (!strcmp(name, ".dynamic")) {
dynamic = i;
}
}
char value[50];
name = "";
dyn = (Elf64_Dyn *)&elf_map[shdr[dynamic].sh_offset];
count = shdr[dynamic].sh_size / sizeof(Elf64_Dyn);
INFO("Dynamic section at offset 0x%x contains %d entries\n", shdr[dynamic].sh_offset, count);
PRINT_DYN_TITLE("Tag", "Type", "Name/Value");
for(int i = 0; i < count; i++) {
tmp = "";
memset(value, 0, 50);
snprintf(value, 50, "0x%x", dyn[i].d_un.d_val);
switch (dyn[i].d_tag) {
/* Legal values for d_tag (dynamic entry type). */
case DT_NULL:
tmp = "DT_NULL";
break;
case DT_NEEDED:
tmp = "DT_NEEDED";
name = elf_map + shdr[dynstr].sh_offset + dyn[i].d_un.d_val;
snprintf(value, 50, "Shared library: [%s]", name);
break;
case DT_PLTRELSZ:
tmp = "DT_PLTRELSZ";
break;
case DT_PLTGOT:
tmp = "DT_PLTGOT";
break;
case DT_HASH:
tmp = "DT_HASH";
break;
case DT_STRTAB:
tmp = "DT_STRTAB";
break;
case DT_SYMTAB:
tmp = "DT_SYMTAB";
break;
case DT_RELA:
tmp = "DT_RELA";
break;
case DT_RELASZ:
tmp = "DT_RELASZ";
break;
case DT_RELAENT:
tmp = "DT_RELAENT";
break;
case DT_STRSZ:
tmp = "DT_STRSZ";
break;
case DT_SYMENT:
tmp = "DT_SYMENT";
break;
case DT_INIT:
tmp = "DT_INIT";
break;
case DT_FINI:
tmp = "DT_FINI";
break;
case DT_SONAME:
tmp = "DT_SONAME";
break;
case DT_RPATH:
tmp = "DT_RPATH";
break;
case DT_SYMBOLIC:
tmp = "DT_SYMBOLIC";
break;
case DT_REL:
tmp = "DT_REL";
break;
case DT_RELSZ:
tmp = "DT_RELSZ";
break;
case DT_RELENT:
tmp = "DT_RELENT";
break;
case DT_PLTREL:
tmp = "DT_PLTREL";
break;
case DT_DEBUG:
tmp = "DT_DEBUG";
break;
case DT_TEXTREL:
tmp = "DT_TEXTREL";
break;
case DT_JMPREL:
tmp = "DT_JMPREL";
break;
case DT_BIND_NOW:
tmp = "DT_BIND_NOW";
break;
case DT_INIT_ARRAY:
tmp = "DT_INIT_ARRAY";
break;
case DT_FINI_ARRAY:
tmp = "DT_FINI_ARRAY";
break;
case DT_INIT_ARRAYSZ:
tmp = "DT_INIT_ARRAYSZ";
break;
case DT_FINI_ARRAYSZ:
tmp = "DT_FINI_ARRAYSZ";
break;
case DT_RUNPATH:
tmp = "DT_RUNPATH";
break;
case DT_FLAGS:
tmp = "DT_FLAGS";
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
case DT_ENCODING:
tmp = "DT_ENCODING";
break;
case DT_PREINIT_ARRAYSZ:
tmp = "DT_PREINIT_ARRAYSZ";
break;
case DT_SYMTAB_SHNDX:
tmp = "DT_SYMTAB_SHNDX";
break;
case DT_NUM:
tmp = "DT_NUM";
break;
case DT_LOOS:
tmp = "DT_LOOS";
break;
case DT_HIOS:
tmp = "DT_HIOS";
break;
case DT_LOPROC:
tmp = "DT_LOPROC";
break;
case DT_HIPROC:
tmp = "DT_HIPROC";
break;
case DT_PROCNUM:
tmp = "DT_LOPROC";
break;
/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
* Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
* approach. */
case DT_VALRNGLO:
tmp = "DT_VALRNGLO";
break;
case DT_GNU_PRELINKED:
tmp = "DT_GNU_PRELINKED";
break;
case DT_GNU_CONFLICTSZ:
tmp = "DT_GNU_CONFLICTSZ";
break;
case DT_GNU_LIBLISTSZ:
tmp = "DT_GNU_LIBLISTSZ";
break;
case DT_CHECKSUM:
tmp = "DT_CHECKSUM";
break;
case DT_PLTPADSZ:
tmp = "DT_PLTPADSZ";
break;
case DT_MOVEENT:
tmp = "DT_MOVEENT";
break;
case DT_MOVESZ:
tmp = "DT_MOVESZ";
break;
case DT_FEATURE_1:
tmp = "DT_FEATURE_1";
break;
case DT_POSFLAG_1:
tmp = "DT_POSFLAG_1";
break;
case DT_SYMINSZ:
tmp = "DT_SYMINSZ";
break;
case DT_SYMINENT:
tmp = "DT_SYMINENT";
break;
/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
* Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
* If any adjustment is made to the ELF object after it has been
* built these entries will need to be adjusted. */
case DT_ADDRRNGLO:
tmp = "DT_ADDRRNGLO";
break;
case DT_GNU_HASH:
tmp = "DT_GNU_HASH";
break;
case DT_TLSDESC_PLT:
tmp = "DT_TLSDESC_PLT";
break;
case DT_TLSDESC_GOT:
tmp = "DT_TLSDESC_GOT";
break;
case DT_GNU_CONFLICT:
tmp = "DT_GNU_CONFLICT";
break;
case DT_GNU_LIBLIST:
tmp = "DT_GNU_LIBLIST";
break;
case DT_CONFIG:
tmp = "DT_CONFIG";
break;
case DT_DEPAUDIT:
tmp = "DT_DEPAUDIT";
break;
case DT_AUDIT:
tmp = "DT_AUDIT";
break;
case DT_PLTPAD:
tmp = "DT_PLTPAD";
break;
case DT_MOVETAB:
tmp = "DT_MOVETAB";
break;
case DT_SYMINFO:
tmp = "DT_SYMINFO";
break;
/* The versioning entry types. The next are defined as part of the
* GNU extension. */
case DT_VERSYM:
tmp = "DT_VERSYM";
break;
case DT_RELACOUNT:
tmp = "DT_RELACOUNT";
break;
case DT_RELCOUNT:
tmp = "DT_RELCOUNT";
break;
/* These were chosen by Sun. */
case DT_FLAGS_1:
tmp = "DT_FLAGS_1";
switch (dyn[i].d_un.d_val) {
case DF_1_PIE:
snprintf(value, 50, "Flags: %s", "PIE");
break;
default:
snprintf(value, 50, "Flags: %d", dyn[i].d_un.d_val);
break;
}
break;
case DT_VERDEF:
tmp = "DT_VERDEF";
break;
case DT_VERDEFNUM:
tmp = "DT_VERDEFNUM";
break;
case DT_VERNEED:
tmp = "DT_VERNEED";
break;
case DT_VERNEEDNUM:
tmp = "DT_VERNEEDNUM";
break;
default:
break;
}
PRINT_DYN(dyn[i].d_tag, tmp, value);
}
}
return 0;
}
|
212625338852409612714470845997465821390
|
None
|
CWE-125
|
CVE-2022-21711
|
elfspirit is an ELF static analysis and injection framework that parses, manipulates, and camouflages ELF files. When analyzing the ELF file format in versions prior to 1.1, there is an out-of-bounds read bug, which can lead to application crashes or information leakage. By constructing a special format ELF file, the information of any address can be leaked. elfspirit version 1.1 contains a patch for this issue.
|
https://nvd.nist.gov/vuln/detail/CVE-2022-21711
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.