Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import com.microsoft.azure.synapse.ml.core.test.fuzzing.{TestObject, Transformer
import com.microsoft.azure.synapse.ml.services.openai._
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.{DataFrame, Row}
import org.scalactic.Equality

trait AIFoundryAPIKey {
lazy val aiFoundryAPIKey: String = sys.env.getOrElse("AI_FOUNDRY_API_KEY", Secrets.AIFoundryApiKey)
Expand All @@ -18,6 +17,8 @@ trait AIFoundryAPIKey {
}

class AIFoundryChatCompletionSuite extends TransformerFuzzing[AIFoundryChatCompletion] with AIFoundryAPIKey with Flaky {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -211,10 +212,6 @@ class AIFoundryChatCompletionSuite extends TransformerFuzzing[AIFoundryChatCompl
assert(c.message.content.length > requiredLength)))
}

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
super.assertDFEq(df1.drop("out"), df2.drop("out"))(eq)
}

override def testObjects(): Seq[TestObject[AIFoundryChatCompletion]] =
Seq(new TestObject(completion, goodDf))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ trait AnomalyDetectorSuiteBase extends TestBase with AnomalyKey {
}

class DetectLastAnomalySuite extends TransformerFuzzing[DetectLastAnomaly] with AnomalyDetectorSuiteBase {
override val compareDataInSerializationTest: Boolean = false


lazy val ad: DetectLastAnomaly = new DetectLastAnomaly()
.setSubscriptionKey(anomalyKey)
Expand Down Expand Up @@ -134,6 +136,8 @@ class DetectLastAnomalySuite extends TransformerFuzzing[DetectLastAnomaly] with
}

class DetectAnomaliesSuite extends TransformerFuzzing[DetectAnomalies] with AnomalyDetectorSuiteBase {
override val compareDataInSerializationTest: Boolean = false


lazy val ad: DetectAnomalies = new DetectAnomalies()
.setSubscriptionKey(anomalyKey)
Expand Down Expand Up @@ -172,6 +176,7 @@ class DetectAnomaliesSuite extends TransformerFuzzing[DetectAnomalies] with Anom

class SimpleDetectAnomaliesSuite extends TransformerFuzzing[SimpleDetectAnomalies]
with AnomalyDetectorSuiteBase {
override val compareDataInSerializationTest: Boolean = false

lazy val baseSeq = Seq(
("1972-01-01T00:00:00Z", 826.0),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,17 @@ package com.microsoft.azure.synapse.ml.services.face
import com.microsoft.azure.synapse.ml.services._
import com.microsoft.azure.synapse.ml.core.test.fuzzing.{TestObject, TransformerFuzzing}
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.functions.{col, explode, lit}
import org.apache.spark.sql.functions.{col, lit}
import org.apache.spark.sql.{DataFrame, Row}
import org.scalactic.Equality

import java.time.LocalDateTime
import java.time.format.{DateTimeFormatterBuilder, DateTimeParseException, SignStyle}
import java.time.temporal.ChronoField
import scala.util.matching.Regex

class DetectFaceSuite extends TransformerFuzzing[DetectFace] with CognitiveKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand All @@ -32,11 +33,6 @@ class DetectFaceSuite extends TransformerFuzzing[DetectFace] with CognitiveKey {
.setReturnFaceLandmarks(true)
.setReturnFaceAttributes(Seq("exposure"))

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = df.select(explode(col("face"))).select("col.*").drop("faceId")
super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage") {
face.transform(df)
val results = face.transform(df)
Expand All @@ -55,6 +51,8 @@ class DetectFaceSuite extends TransformerFuzzing[DetectFace] with CognitiveKey {
}

class FindSimilarFaceSuite extends TransformerFuzzing[FindSimilarFace] with CognitiveKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -117,6 +115,8 @@ class FindSimilarFaceSuite extends TransformerFuzzing[FindSimilarFace] with Cogn
}

class GroupFacesSuite extends TransformerFuzzing[GroupFaces] with CognitiveKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -178,6 +178,8 @@ class GroupFacesSuite extends TransformerFuzzing[GroupFaces] with CognitiveKey {
}

class IdentifyFacesSuite extends TransformerFuzzing[IdentifyFaces] with CognitiveKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -328,6 +330,8 @@ class IdentifyFacesSuite extends TransformerFuzzing[IdentifyFaces] with Cognitiv
}

class VerifyFacesSuite extends TransformerFuzzing[VerifyFaces] with CognitiveKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@ import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.{ArrayType, DoubleType, StringType, StructType}
import org.scalactic.Equality

class FormOntologyLearnerSuite extends EstimatorFuzzing[FormOntologyLearner] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -90,14 +91,6 @@ class FormOntologyLearnerSuite extends EstimatorFuzzing[FormOntologyLearner] wit
assert(newDF.select("unified_ontology.*").collect().head.getAs[Double]("TotalTax") === 67.13)
}

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("url", "unified_ontology.SubTotal")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

override def testObjects(): Seq[TestObject[FormOntologyLearner]] =
Seq(new TestObject(ontologyLearner, df))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import org.apache.spark.ml.Transformer
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.col
import org.scalactic.Equality
import spray.json._

import java.net.URI
Expand Down Expand Up @@ -155,6 +154,8 @@ trait FormRecognizerUtils extends TestBase with CognitiveKey with Flaky with Ima
}

class AnalyzeLayoutSuite extends TransformerFuzzing[AnalyzeLayout] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


lazy val analyzeLayout: AnalyzeLayout = new AnalyzeLayout()
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
Expand All @@ -164,14 +165,6 @@ class AnalyzeLayoutSuite extends TransformerFuzzing[AnalyzeLayout] with FormReco
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
.setImageBytesCol("imageBytes").setOutputCol("layout").setConcurrency(5)

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "layout.analyzeResult.readResults")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage with URL") {
val results = imageDf1.mlTransform(analyzeLayout,
flattenReadResults("layout", "readlayout"),
Expand Down Expand Up @@ -219,6 +212,8 @@ class AnalyzeLayoutSuite extends TransformerFuzzing[AnalyzeLayout] with FormReco
}

class AnalyzeReceiptsSuite extends TransformerFuzzing[AnalyzeReceipts] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


lazy val analyzeReceipts: AnalyzeReceipts = new AnalyzeReceipts()
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
Expand All @@ -228,14 +223,6 @@ class AnalyzeReceiptsSuite extends TransformerFuzzing[AnalyzeReceipts] with Form
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
.setImageBytesCol("imageBytes").setOutputCol("receipts").setConcurrency(5)

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "receipts.analyzeResult.readResults")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage with URL") {
val results = imageDf2.mlTransform(analyzeReceipts,
flattenReadResults("receipts", "readReceipts"),
Expand Down Expand Up @@ -267,6 +254,8 @@ class AnalyzeReceiptsSuite extends TransformerFuzzing[AnalyzeReceipts] with Form
}

class AnalyzeBusinessCardsSuite extends TransformerFuzzing[AnalyzeBusinessCards] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


lazy val analyzeBusinessCards: AnalyzeBusinessCards = new AnalyzeBusinessCards()
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
Expand All @@ -276,14 +265,6 @@ class AnalyzeBusinessCardsSuite extends TransformerFuzzing[AnalyzeBusinessCards]
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
.setImageBytesCol("imageBytes").setOutputCol("businessCards").setConcurrency(5)

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "businessCards.analyzeResult.readResults")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage with URL") {
val results = imageDf3.mlTransform(analyzeBusinessCards,
flattenReadResults("businessCards", "readBusinessCards"),
Expand Down Expand Up @@ -317,6 +298,8 @@ class AnalyzeBusinessCardsSuite extends TransformerFuzzing[AnalyzeBusinessCards]
}

class AnalyzeInvoicesSuite extends TransformerFuzzing[AnalyzeInvoices] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


lazy val analyzeInvoices: AnalyzeInvoices = new AnalyzeInvoices()
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
Expand All @@ -326,14 +309,6 @@ class AnalyzeInvoicesSuite extends TransformerFuzzing[AnalyzeInvoices] with Form
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
.setImageBytesCol("imageBytes").setOutputCol("invoices").setConcurrency(5)

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "invoices.analyzeResult.readResults")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage with URL") {
val results = imageDf4.mlTransform(analyzeInvoices,
flattenReadResults("invoices", "readInvoices"),
Expand Down Expand Up @@ -377,6 +352,8 @@ class AnalyzeInvoicesSuite extends TransformerFuzzing[AnalyzeInvoices] with Form
}

class AnalyzeIDDocumentsSuite extends TransformerFuzzing[AnalyzeIDDocuments] with FormRecognizerUtils {
override val compareDataInSerializationTest: Boolean = false


lazy val analyzeIDDocuments: AnalyzeIDDocuments = new AnalyzeIDDocuments()
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
Expand All @@ -386,14 +363,6 @@ class AnalyzeIDDocumentsSuite extends TransformerFuzzing[AnalyzeIDDocuments] wit
.setSubscriptionKey(cognitiveKey).setLocation("eastus")
.setImageBytesCol("imageBytes").setOutputCol("ids").setConcurrency(5)

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "ids.analyzeResult.readResults")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

test("Basic Usage with URL") {
val results = imageDf5.mlTransform(analyzeIDDocuments,
flattenReadResults("ids", "readIds"),
Expand Down Expand Up @@ -439,6 +408,7 @@ trait CustomModelUtils extends TestBase with CognitiveKey {

class ListCustomModelsSuite extends TransformerFuzzing[ListCustomModels]
with FormRecognizerUtils with CustomModelUtils {
override val compareDataInSerializationTest: Boolean = false

lazy val listCustomModels: ListCustomModels = {
new ListCustomModels()
Expand All @@ -449,14 +419,6 @@ class ListCustomModelsSuite extends TransformerFuzzing[ListCustomModels]
.setConcurrency(5)
}

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("models.summary.count")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}

ignore("List model list details") {
val results = pathDf.mlTransform(listCustomModels,
flattenModelList("models", "modelIds"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import com.microsoft.azure.synapse.ml.core.test.fuzzing.{TestObject, Transformer
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Row}
import org.scalactic.Equality

import scala.collection.immutable.HashMap

Expand Down Expand Up @@ -49,14 +48,7 @@ trait FormRecognizerV3Utils extends TestBase {

class AnalyzeDocumentSuite extends TransformerFuzzing[AnalyzeDocument] with FormRecognizerUtils
with CustomModelUtils with FormRecognizerV3Utils {

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
def prep(df: DataFrame) = {
df.select("source", "result.analyzeResult.content")
}

super.assertDFEq(prep(df1), prep(df2))(eq)
}
override val compareDataInSerializationTest: Boolean = false

test("basic usage with tables") {
val fromRow = AnalyzeDocumentResponse.makeFromRowConverter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import org.apache.http.entity.StringEntity
import org.apache.spark.ml.util.MLReadable
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.col
import org.scalactic.Equality

import java.net.URI

Expand All @@ -24,6 +23,8 @@ trait AzureMapsKey {
}

class AzMapsSearchAddressSuite extends TransformerFuzzing[AddressGeocoder] with AzureMapsKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -71,10 +72,6 @@ class AzMapsSearchAddressSuite extends TransformerFuzzing[AddressGeocoder] with
assert(flattenedResults.toSeq.head.get(1).toString.startsWith("47.6418"))
}

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
super.assertDFEq(extractFields(df1), extractFields(df2))(eq)
}

override def testObjects(): Seq[TestObject[AddressGeocoder]] =
Seq(new TestObject[AddressGeocoder](
batchGeocodeAddresses,
Expand All @@ -84,6 +81,8 @@ class AzMapsSearchAddressSuite extends TransformerFuzzing[AddressGeocoder] with
}

class AzMapsSearchReverseAddressSuite extends TransformerFuzzing[ReverseAddressGeocoder] with AzureMapsKey {
override val compareDataInSerializationTest: Boolean = false


import spark.implicits._

Expand Down Expand Up @@ -148,10 +147,6 @@ class AzMapsSearchReverseAddressSuite extends TransformerFuzzing[ReverseAddressG

}

override def assertDFEq(df1: DataFrame, df2: DataFrame)(implicit eq: Equality[DataFrame]): Unit = {
super.assertDFEq(extractFields(df1), extractFields(df2))(eq)
}

override def testObjects(): Seq[TestObject[ReverseAddressGeocoder]] =
Seq(new TestObject[ReverseAddressGeocoder](
batchReverseGeocode,
Expand Down
Loading
Loading