diff --git a/src/main/scala/com/databricks/spark/avro/SchemaConverters.scala b/src/main/scala/com/databricks/spark/avro/SchemaConverters.scala index 7f8e20f4..debcc2d9 100644 --- a/src/main/scala/com/databricks/spark/avro/SchemaConverters.scala +++ b/src/main/scala/com/databricks/spark/avro/SchemaConverters.scala @@ -52,6 +52,7 @@ object SchemaConverters { case LONG => SchemaType(LongType, nullable = false) case FIXED => SchemaType(BinaryType, nullable = false) case ENUM => SchemaType(StringType, nullable = false) + case NULL => SchemaType(NullType, nullable = false) case RECORD => val fields = avroSchema.getFields.asScala.map { f => @@ -150,7 +151,7 @@ object SchemaConverters { (item: AnyRef) => if (item == null) null else item.toString // Byte arrays are reused by avro, so we have to make a copy of them. case (IntegerType, INT) | (BooleanType, BOOLEAN) | (DoubleType, DOUBLE) | - (FloatType, FLOAT) | (LongType, LONG) => + (FloatType, FLOAT) | (LongType, LONG) | (NullType, NULL) | (NullType, STRING) => identity case (BinaryType, FIXED) => (item: AnyRef) => diff --git a/src/test/scala/com/databricks/spark/avro/AvroSuite.scala b/src/test/scala/com/databricks/spark/avro/AvroSuite.scala index 4843ad46..efc35eef 100644 --- a/src/test/scala/com/databricks/spark/avro/AvroSuite.scala +++ b/src/test/scala/com/databricks/spark/avro/AvroSuite.scala @@ -109,9 +109,8 @@ class AvroSuite extends FunSuite with BeforeAndAfterAll { dataFileWriter.flush() dataFileWriter.close() - intercept[IncompatibleSchemaException] { - spark.read.avro(s"$dir.avro") - } + val df = spark.read.avro(s"$dir.avro") + assert(df.count() == 1) } }