Skip to content

Commit

Permalink
Merge branch 'master' into yunyad-remove-redundancy
Browse files Browse the repository at this point in the history
  • Loading branch information
yunyad authored Jan 14, 2025
2 parents 8ca0f0b + 36e8752 commit ad0af11
Show file tree
Hide file tree
Showing 50 changed files with 2,322 additions and 1,851 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ import edu.uci.ics.amber.core.virtualidentity.ActorVirtualIdentity
import edu.uci.ics.texera.dao.SqlServer
import edu.uci.ics.texera.dao.SqlServer.withTransaction
import edu.uci.ics.texera.dao.jooq.generated.Tables.{
OPERATOR_EXECUTIONS,
OPERATOR_RUNTIME_STATISTICS,
WORKFLOW_EXECUTIONS,
WORKFLOW_RUNTIME_STATISTICS,
WORKFLOW_VERSION
}
import edu.uci.ics.texera.dao.jooq.generated.tables.pojos.WorkflowRuntimeStatistics
import edu.uci.ics.texera.web.resource.dashboard.user.workflow.WorkflowExecutionsResource.WorkflowRuntimeStatistics
import org.jooq.types.UInteger

import scala.jdk.CollectionConverters.ListHasAsScala
Expand Down Expand Up @@ -99,46 +100,48 @@ class DefaultCostEstimator(
val widAsUInteger = UInteger.valueOf(wid)
val rawStats = context
.select(
WORKFLOW_RUNTIME_STATISTICS.OPERATOR_ID,
WORKFLOW_RUNTIME_STATISTICS.TIME,
WORKFLOW_RUNTIME_STATISTICS.DATA_PROCESSING_TIME,
WORKFLOW_RUNTIME_STATISTICS.CONTROL_PROCESSING_TIME,
WORKFLOW_RUNTIME_STATISTICS.EXECUTION_ID
OPERATOR_EXECUTIONS.OPERATOR_ID,
OPERATOR_RUNTIME_STATISTICS.INPUT_TUPLE_CNT,
OPERATOR_RUNTIME_STATISTICS.OUTPUT_TUPLE_CNT,
OPERATOR_RUNTIME_STATISTICS.TIME,
OPERATOR_RUNTIME_STATISTICS.DATA_PROCESSING_TIME,
OPERATOR_RUNTIME_STATISTICS.CONTROL_PROCESSING_TIME,
OPERATOR_RUNTIME_STATISTICS.IDLE_TIME,
OPERATOR_RUNTIME_STATISTICS.NUM_WORKERS
)
.from(OPERATOR_EXECUTIONS)
.join(OPERATOR_RUNTIME_STATISTICS)
.on(
OPERATOR_EXECUTIONS.OPERATOR_EXECUTION_ID.eq(
OPERATOR_RUNTIME_STATISTICS.OPERATOR_EXECUTION_ID
)
)
.from(WORKFLOW_RUNTIME_STATISTICS)
.where(
WORKFLOW_RUNTIME_STATISTICS.WORKFLOW_ID
.eq(widAsUInteger)
.and(
WORKFLOW_RUNTIME_STATISTICS.EXECUTION_ID.eq(
context
.select(
WORKFLOW_EXECUTIONS.EID
)
.from(WORKFLOW_EXECUTIONS)
.join(WORKFLOW_VERSION)
.on(WORKFLOW_VERSION.VID.eq(WORKFLOW_EXECUTIONS.VID))
.where(
WORKFLOW_VERSION.WID
.eq(widAsUInteger)
.and(WORKFLOW_EXECUTIONS.STATUS.eq(3.toByte))
)
.orderBy(WORKFLOW_EXECUTIONS.STARTING_TIME.desc())
.limit(1)
OPERATOR_EXECUTIONS.WORKFLOW_EXECUTION_ID.eq(
context
.select(WORKFLOW_EXECUTIONS.EID)
.from(WORKFLOW_EXECUTIONS)
.join(WORKFLOW_VERSION)
.on(WORKFLOW_VERSION.VID.eq(WORKFLOW_EXECUTIONS.VID))
.where(
WORKFLOW_VERSION.WID
.eq(widAsUInteger)
.and(WORKFLOW_EXECUTIONS.STATUS.eq(3.toByte))
)
)
.orderBy(WORKFLOW_EXECUTIONS.STARTING_TIME.desc())
.limit(1)
)
)
.orderBy(WORKFLOW_RUNTIME_STATISTICS.TIME, WORKFLOW_RUNTIME_STATISTICS.OPERATOR_ID)
.fetchInto(classOf[WorkflowRuntimeStatistics])
.asScala
.toList
if (rawStats.isEmpty) {
None
} else {
val cumulatedStats = rawStats.foldLeft(Map.empty[String, Double]) { (acc, stat) =>
val opTotalExecutionTime = acc.getOrElse(stat.getOperatorId, 0.0)
acc + (stat.getOperatorId -> (opTotalExecutionTime + (stat.getDataProcessingTime
.doubleValue() + stat.getControlProcessingTime.doubleValue()) / 1e9))
val opTotalExecutionTime = acc.getOrElse(stat.operatorId, 0.0)
acc + (stat.operatorId -> (opTotalExecutionTime + (stat.dataProcessingTime
.doubleValue() + stat.controlProcessingTime.doubleValue()) / 1e9))
}
Some(cumulatedStats)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ class ComputingUnitMaster extends io.dropwizard.Application[Configuration] with
val storageType = collection.get("storageType").asText()
val collectionName = collection.get("storageKey").asText()
storageType match {
case OpResultStorage.MEMORY | OpResultStorage.ICEBERG =>
case OpResultStorage.ICEBERG =>
// rely on the server-side result cleanup logic.
case OpResultStorage.MONGODB =>
MongoDatabaseManager.dropCollection(collectionName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import edu.uci.ics.texera.web.resource.dashboard.user.dataset.{
DatasetAccessResource,
DatasetResource
}
import edu.uci.ics.texera.web.resource.dashboard.user.discussion.UserDiscussionResource
import edu.uci.ics.texera.web.resource.dashboard.user.project.{
ProjectAccessResource,
ProjectResource,
Expand Down Expand Up @@ -147,7 +146,6 @@ class TexeraWebApplication
environment.jersey.register(classOf[GmailResource])
environment.jersey.register(classOf[AdminExecutionResource])
environment.jersey.register(classOf[UserQuotaResource])
environment.jersey.register(classOf[UserDiscussionResource])
environment.jersey.register(classOf[AIAssistantResource])
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ object DatasetSearchQueryBuilder extends SearchQueryBuilder {
),
dataset.getOwnerUid == uid,
List(),
DatasetResource.calculateLatestDatasetVersionSize(dataset.getDid)
DatasetResource.calculateDatasetVersionSize(dataset.getDid)
)
DashboardClickableFileEntry(
resourceType = SearchQueryBuilder.DATASET_RESOURCE_TYPE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ import edu.uci.ics.texera.web.resource.dashboard.user.dataset.DatasetAccessResou
context,
getOwner
}
import org.jooq.DSLContext
import org.jooq.{Condition, DSLContext}
import org.jooq.impl.DSL
import org.jooq.types.UInteger

import java.util
Expand All @@ -28,36 +29,28 @@ object DatasetAccessResource {
.createDSLContext()

def userHasReadAccess(ctx: DSLContext, did: UInteger, uid: UInteger): Boolean = {
val datasetDao = new DatasetDao(ctx.configuration())
val isDatasetPublic = Option(datasetDao.fetchOneByDid(did))
.flatMap(dataset => Option(dataset.getIsPublic))
.contains(1.toByte)

isDatasetPublic ||
userHasWriteAccess(ctx, did, uid) ||
datasetIsPublic(ctx, did) ||
getDatasetUserAccessPrivilege(ctx, did, uid) == DatasetUserAccessPrivilege.READ
}

def userOwnDataset(ctx: DSLContext, did: UInteger, uid: UInteger): Boolean = {
ctx
.selectOne()
.from(DATASET)
.where(DATASET.DID.eq(did))
.and(DATASET.OWNER_UID.eq(uid))
.fetch()
.isNotEmpty
val datasetDao = new DatasetDao(ctx.configuration())

Option(datasetDao.fetchOneByDid(did))
.exists(_.getOwnerUid == uid)
}

def userHasWriteAccess(ctx: DSLContext, did: UInteger, uid: UInteger): Boolean = {
userOwnDataset(ctx, did, uid) ||
getDatasetUserAccessPrivilege(ctx, did, uid) == DatasetUserAccessPrivilege.WRITE
}

def datasetIsPublic(ctx: DSLContext, did: UInteger): Boolean = {
Option(
ctx
.select(DATASET.IS_PUBLIC)
.from(DATASET)
.where(DATASET.DID.eq(did))
.fetchOneInto(classOf[Boolean])
).getOrElse(false)
}

def getDatasetUserAccessPrivilege(
ctx: DSLContext,
did: UInteger,
Expand All @@ -67,21 +60,23 @@ object DatasetAccessResource {
ctx
.select(DATASET_USER_ACCESS.PRIVILEGE)
.from(DATASET_USER_ACCESS)
.where(DATASET_USER_ACCESS.DID.eq(did))
.and(DATASET_USER_ACCESS.UID.eq(uid))
.fetchOne()
)
.map(_.getValue(DATASET_USER_ACCESS.PRIVILEGE))
.getOrElse(DatasetUserAccessPrivilege.NONE)
.where(
DATASET_USER_ACCESS.DID
.eq(did)
.and(DATASET_USER_ACCESS.UID.eq(uid))
)
.fetchOneInto(classOf[DatasetUserAccessPrivilege])
).getOrElse(DatasetUserAccessPrivilege.NONE)
}

def getOwner(ctx: DSLContext, did: UInteger): User = {
val ownerUid = ctx
.select(DATASET.OWNER_UID)
.from(DATASET)
.where(DATASET.DID.eq(did))
.fetchOneInto(classOf[UInteger])
new UserDao(ctx.configuration()).fetchOneByUid(ownerUid)
val datasetDao = new DatasetDao(ctx.configuration())
val userDao = new UserDao(ctx.configuration())

Option(datasetDao.fetchOneByDid(did))
.flatMap(dataset => Option(dataset.getOwnerUid))
.map(ownerUid => userDao.fetchOneByUid(ownerUid))
.orNull
}
}

Expand Down
Loading

0 comments on commit ad0af11

Please sign in to comment.