'If File.Exists(paths & strFileNameOnly) Then
'lblMessage.Text = "Image of Similar name already Exist,Choose other name"
'Else
If uploadfile.PostedFile.ContentLength > 40000 Then
lblmessage.Text = "The Size of file is greater than 4 MB"
ElseIf strFileNameOnly = "" Then
Exit Sub
Else
'strfilename = uploadfile.FileName.Substring(0, (InStr(uploadfile.FileName, ".") - 1))
strFileNameOnly = fileName & ".csv"
strpath = "/sites/marketing/apps/disposition/content/excel/" '& strFileNameOnly
uploadfile.PostedFile.SaveAs(Server.MapPath(strpath) & strFileNameOnly)
'lblmessage.Text = "File Upload Success."
'Session("Img") = strFileNameOnly
Dim strConn As String = "Provider=Microsoft.Jet.OLEDB.4.0;Data Source=" & (Server.MapPath(strpath)) & ";Extended Properties=""Text;HDR=No;FMT=Delimited"""
'"Provider=Microsoft.Jet.OLEDB.4.0;" & _
'"Data Source=" & "/" & strFileNameOnly & ";" & _
'"Extended Properties=Excel 8.0;"
Dim conn As New OleDb.OleDbConnection(strConn)
Dim myData As New OleDbDataAdapter("SELECT * FROM " & strFileNameOnly, conn)
Dim myDatatable As New System.Data.DataTable
Dim mySqlConnection = New SqlConnection(ConfigurationManager.ConnectionStrings("BDOConnectionString").ToString())
''You must use the $ after the object you reference in the spreadsheet
myData.Fill(myDatatable)
InsertData(myDatatable, mySqlConnection)
'System.IO.File.Delete(Server.MapPath(strpath))
GridView1.DataBind()
upload.Visible = False
End If
End If
' GridView1.DataSource = myDataset.Tables(0).DefaultView
' GridView1.DataBind()
End Sub
Private Shared Sub InsertData(ByVal sourceTable As System.Data.DataTable, ByVal destConnection As SqlConnection)
' old method: Lots of INSERT statements
' first, create the insert command that we will call over and over:
destConnection.Open()
Using ins As New SqlCommand("INSERT INTO [tblAppointmentDisposition] ([contactdate], [dnbnumber], [prospectname], [businessofficer], [phonemeeting], [followupcalldate2], [phonemeetingappt], [followupcalldate3], [appointmentdate], [appointmentlocation], [appointmentkept], [applicationgenerated], [applicationgenerated2], [applicationgenerated3], [comments], [newaccount], [futureopportunity]) VALUES (@contactdate, @dnbnumber, @prospectname, @businessofficer, @phonemeeting, @followupcalldate2, @phonemeetingappt, @followupcalldate3, @appointmentdate, @appointmentlocation, @appointmentkept, @applicationgenerated, @applicationgenerated2, @applicationgenerated3, @comments, @newaccount, @futureopportunity)", destConnection)
ins.CommandType = CommandType.Text
ins.Parameters.Add("@contactdate", SqlDbType.Text)
ins.Parameters.Add("@dnbnumber", SqlDbType.Text)
ins.Parameters.Add("@prospectname", SqlDbType.Text)
ins.Parameters.Add("@businessofficer", SqlDbType.NVarChar)
ins.Parameters.Add("@phonemeeting", SqlDbType.Text)
ins.Parameters.Add("@followupcalldate2", SqlDbType.Text)
ins.Parameters.Add("@phonemeetingappt", SqlDbType.Text)
ins.Parameters.Add("@followupcalldate3", SqlDbType.Text)
ins.Parameters.Add("@appointmentdate", SqlDbType.Text)
ins.Parameters.Add("@appointmentlocation", SqlDbType.Text)
ins.Parameters.Add("@appointmentkept", SqlDbType.Text)
ins.Parameters.Add("@applicationgenerated", SqlDbType.Text)
ins.Parameters.Add("@applicationgenerated2", SqlDbType.Text)
ins.Parameters.Add("@applicationgenerated3", SqlDbType.Text)
ins.Parameters.Add("@comments", SqlDbType.Text)
ins.Parameters.Add("@newaccount", SqlDbType.Text)
ins.Parameters.Add("@futureopportunity", SqlDbType.Text)
' and now, do the work:
For Each r As DataRow In sourceTable.Rows
If sourceTable.Rows.IndexOf(sourceTable.Rows(0)) Then
'do nothing
Else
For i As Integer = 0 To 16
ins.Parameters(i).Value = r(i)
Next
ins.ExecuteNonQuery()
'If System.Threading.Interlocked.Increment(rowscopied) Mod 10000 = 0 Then
'Console.WriteLine("-- copied {0} rows.", rowscopied)
'End If
End If
Next
End Using
destConnection.Close()
End Sub
Just attempting to import a simple tab delimited text file into my SQL Server 2005 database using the SQL Server Import and Export wizard. Column names are specified within the first line of the file. The Header Rows to Skip field value is listed as 0, but the wizard indicates that "The field, Header rows to skip, does not contain a valid numeric value".
Why isn't zero (0) a valid numeric value? I don't want to skip any rows. PLUS, I get the same error when trying to export to a text file although the header rows to skip field does not exist. I can increase the number to 1 or more, but the wizard will skip part of my data .. unacceptable.
What am I missing here? I installed SP1 of SQL server 2005, but that did not help.
I was playing around with the new SQL 2005 CLR functionality andremembered this discussion that I had with Erland Sommarskog concerningperformance of scalar UDFs some time ago (See "Calling sp_oa* infunction" in this newsgroup). In that discussion, Erland made thefollowing comment about UDFs in SQL 2005:[color=blue][color=green]>>The good news is that in SQL 2005, Microsoft has addressed several of[/color][/color]these issues, and the cost of a UDF is not as severe there. In fact fora complex expression, a UDF in written a CLR language may be fasterthanthe corresponding expression using built-in T-SQL functions.<<I thought the I would put this to the test using some of the same SQLas before, but adding a simple scalar CLR UDF into the mix. The testinvolved querying a simple table with about 300,000 rows. Thescenarios are as follows:(A) Use a simple CASE function to calculate a column(B) Use a simple CASE function to calculate a column and as a criterionin the WHERE clause(C) Use a scalar UDF to calculate a column(D) Use a scalar UDF to calculate a column and as a criterion in theWHERE clause(E) Use a scalar CLR UDF to calculate a column(F) Use a scalar CLR UDF to calculate a column and as a criterion inthe WHERE clauseA sample of the results is as follows (time in milliseconds):(295310 row(s) affected)A: 1563(150003 row(s) affected)B: 906(295310 row(s) affected)C: 2703(150003 row(s) affected)D: 2533(295310 row(s) affected)E: 2060(150003 row(s) affected)F: 2190The scalar CLR UDF function was significantly faster than the classicscalar UDF, even for this very simple function. Perhaps a more complexfunction would have shown even a greater difference. Based on this, Imust conclude that Erland was right. Of course, it's still faster tostick with basic built-in functions like CASE.In another test, I decided to run some queries to compare built-inaggregates vs. a couple of simple CLR aggregates as follows:(G) Calculate averages by group using the built-in AVG aggregate(H) Calculate averages by group using a CLR aggregate that similatesthe built-in AVG aggregate(I) Calculate a "trimmed" average by group (average excluding highestand lowest values) using built-in aggregates(J) Calculate a "trimmed" average by group using a CLR aggregatespecially designed for this purposeA sample of the results is as follows (time in milliseconds):(59 row(s) affected)G: 313(59 row(s) affected)H: 890(59 row(s) affected)I: 216(59 row(s) affected)J: 846It seems that the CLR aggregates came with a significant performancepenalty over the built-in aggregates. Perhaps they would pay off if Iwere attempting a very complex type of aggregation. However, at thispoint I'm going to shy away from using these unless I can't find a wayto do the calculation with standard SQL.In a way, I'm happy that basic SQL still seems to be the fastest way toget things done. With the addition of the new CLR functionality, Isuspect that MS may be giving us developers enough rope to comfortablyhang ourselves if we're not careful.Bill E.Hollywood, FL------------------------------------------------------------------------- table TestAssignment, about 300,000 rowsCREATE TABLE [dbo].[TestAssignment]([TestAssignmentID] [int] NOT NULL,[ProductID] [int] NULL,[PercentPassed] [int] NULL,CONSTRAINT [PK_TestAssignment] PRIMARY KEY CLUSTERED([TestAssignmentID] ASC)--Scalar UDF in SQLCREATE FUNCTION [dbo].[fnIsEven](@intValue int)RETURNS bitASBEGINDeclare @bitReturnValue bitIf @intValue % 2 = 0Set @bitReturnValue=1ElseSet @bitReturnValue=0RETURN @bitReturnValueEND--Scalar CLR UDF/*using System;using System.Data;using System.Data.SqlClient;using System.Data.SqlTypes;using Microsoft.SqlServer.Server;public partial class UserDefinedFunctions{[Microsoft.SqlServer.Server.SqlFunction(IsDetermini stic=true,IsPrecise=true)]public static SqlBoolean IsEven(SqlInt32 value){if(value % 2 == 0){return true;}else{return false;}}};*/--Test #1--Scenario A - Query with calculated column--SELECT TestAssignmentID,CASE WHEN TestAssignmentID % 2=0 THEN 1 ELSE 0 END ASCalcColumnFROM TestAssignment--Scenario B - Query with calculated column as criterion--SELECT TestAssignmentID,CASE WHEN TestAssignmentID % 2=0 THEN 1 ELSE 0 END ASCalcColumnFROM TestAssignmentWHERE CASE WHEN TestAssignmentID % 2=0 THEN 1 ELSE 0 END=1--Scenario C - Query using scalar UDF--SELECT TestAssignmentID,dbo.fnIsEven(TestAssignmentID) AS CalcColumnFROM TestAssignment--Scenario D - Query using scalar UDF as crierion--SELECT TestAssignmentID,dbo.fnIsEven(TestAssignmentID) AS CalcColumnFROM TestAssignmentWHERE dbo.fnIsEven(TestAssignmentID)=1--Scenario E - Query using CLR scalar UDF--SELECT TestAssignmentID,dbo.fnIsEven_CLR(TestAssignmentID) AS CalcColumnFROM TestAssignment--Scenario F - Query using CLR scalar UDF as crierion--SELECT TestAssignmentID,dbo.fnIsEven_CLR(TestAssignmentID) AS CalcColumnFROM TestAssignmentWHERE dbo.fnIsEven(TestAssignmentID)=1--CLR Aggregate functions/*using System;using System.Data;using System.Data.SqlClient;using System.Data.SqlTypes;using Microsoft.SqlServer.Server;[Serializable][Microsoft.SqlServer.Server.SqlUserDefinedAggregate (Format.Native)]public struct Avg{public void Init(){this.numValues = 0;this.totalValue = 0;}public void Accumulate(SqlDouble Value){if (!Value.IsNull){this.numValues++;this.totalValue += Value;}}public void Merge(Avg Group){if (Group.numValues > 0){this.numValues += Group.numValues;this.totalValue += Group.totalValue;}}public SqlDouble Terminate(){if (numValues == 0){return SqlDouble.Null;}else{return (this.totalValue / this.numValues);}}// private accumulatorsprivate int numValues;private SqlDouble totalValue;}[Serializable][Microsoft.SqlServer.Server.SqlUserDefinedAggregate (Format.Native)]public struct TrimmedAvg{public void Init(){this.numValues = 0;this.totalValue = 0;this.minValue = SqlDouble.MaxValue;this.maxValue = SqlDouble.MinValue;}public void Accumulate(SqlDouble Value){if (!Value.IsNull){this.numValues++;this.totalValue += Value;if (Value < this.minValue)this.minValue = Value;if (Value > this.maxValue)this.maxValue = Value;}}public void Merge(TrimmedAvg Group){if (Group.numValues > 0){this.numValues += Group.numValues;this.totalValue += Group.totalValue;if (Group.minValue < this.minValue)this.minValue = Group.minValue;if (Group.maxValue > this.maxValue)this.maxValue = Group.maxValue;}}public SqlDouble Terminate(){if (this.numValues < 3)return SqlDouble.Null;else{this.numValues -= 2;this.totalValue -= this.minValue;this.totalValue -= this.maxValue;return (this.totalValue / this.numValues);}}// private accumulatorsprivate int numValues;private SqlDouble totalValue;private SqlDouble minValue;private SqlDouble maxValue;}*/--Test #2--Scenario G - Average Query using built-in aggregate--SELECT ProductID, Avg(Cast(PercentPassed AS float))FROM TestAssignmentGROUP BY ProductIDORDER BY ProductID--Scenario H - Average Query using CLR aggregate--SELECT ProductID, dbo.Avg_CLR(Cast(PercentPassed AS float)) AS AverageFROM TestAssignmentGROUP BY ProductIDORDER BY ProductID--Scenario I - Trimmed Average Query using built in aggregates/setoperations--SELECT A.ProductID,CaseWhen B.CountValues<3 Then NullElse Cast(A.Total-B.MaxValue-B.MinValue ASfloat)/Cast(B.CountValues-2 As float)End AS AverageFROM(SELECT ProductID, Sum(PercentPassed) AS TotalFROM TestAssignmentGROUP BY ProductID) ALEFT JOIN(SELECT ProductID,Max(PercentPassed) AS MaxValue,Min(PercentPassed) AS MinValue,Count(*) AS CountValuesFROM TestAssignmentWHERE PercentPassed Is Not NullGROUP BY ProductID) BON A.ProductID=B.ProductIDORDER BY A.ProductID--Scenario J - Trimmed Average Query using CLR aggregate--SELECT ProductID, dbo.TrimmedAvg_CLR(Cast(PercentPassed AS real)) ASAverageFROM TestAssignmentGROUP BY ProductIDORDER BY ProductID
I have one table and it contains a column named ID Number, and a column named Date. I have a Do While statement that runs a SQL select statement a few times based on the number of records with the same ID Number. During the Do While statement the information is copied into another table and deleted from the old table. After I look at the results, I see that at the second Do While loop, the data was not selected and the Select statement did not run... so the old variable value from varValue is used again... Any reasons on why?
Here is a code snippet of what is going on: Do While varCount < varRecordCount conSqlConnect.Open() cmdSelect = New SqlCommand ("Select * From temp_records_1 where [id number]=@idnumber and date<@date", conSqlConnect) cmdSelect.Parameters.Add( "@accountnumber", "10000" ) cmdSelect.Parameters.Add( "@date", dtnow ) dtrdatareader = cmdSelect.ExecuteReader() While dtrdatareader.Read() If IsDbNull(dtrdatareader("value")) = false Then varValue = dtrdatareader("value") End If End While dtrdatareader.Close() conSqlConnect.Close()
'#####The information above is copied to another table here '#####The record where the information was received is deleted.
I submitted an update query on a table of 80 million rows, in the weekend. When I returned on Monday, the transaction was still running. I thought some thing wrong happened and cancelled the transaction. It was taking long time to rollback the transaction. I recycled the SQL Server assuming it will do faster recovery. Now I realised that anyway it is going to take lot of time. And SQL server is not going to be up till the database is recoverd completely. Now can any body suggest me any thing to faster this process or skip this process. I dont know how long it is going to take rollback the transaction which ran for more than 70 hours.
I was attempting to use BCP today via xp_cmdshell. I have never done anything with BCP before, so it was very enlightening. However, I ran across a problem that maybe someone could help explain to me a little more.
I am using the "queryout" option, and when I run it, the error I get is that you "can't skip fields except for on inserts" or something like that.
The reason I was trying to use bcp is the ability to dynamically generate a filename, i.e. filename = 04182004 (the date). Because in the file name argument, I can use a variable. Make sense?
Since I apparently can't ignore fields, I am thinking of taking all of the information I need daily out, and into a seperate table, then I can use the xp_cmdshell to run a bcp that creates a file with the date as a filename, and I won't be ignoring any fields because I have just put the information I need in the new table. Am I making sense? Does this sound like an appropriate thing to do?
I am tryung to execute a Store Proc using Execute SQL Task.
I am very aware that if there is any errors occur I have handled it sufficiently.
All I want to do is, when ever there are any errors in teh Store Proc then this Execute SQL task should not fail and it should go for the next Task in teh control flow.
There is an option in ssis to skip one or more header rows, but there isn't any thing to skip one or more footer rows.
Example:
header bla bla 1;"Joe";24;"New York" 2;"John";54;"Washington" 3;"Phil";36;"San Francisco" footer bla bla
I skip the first record in the source definition. So I have left 4 records. How do I skip the fourth (last) record? The value contains some statistics so I cann't look for a special value. Is there a way to skip the last record with a script component?
Hello, im using sqldatareader to read my data and whenever time i loop through the reader it starts from second row why is that? here is my code:while (reader.Read()){hinfo.Name = reader["_name"].ToString();hi.Add(hinfo);} i look at the database and i have two rows but its reading only the second row, skiping the first row
Hi, I have the code below. I need to skip the first row in the datatable as it has the headers. This works now, but my gridview gets the header row inserted as a record.Private Shared Sub InsertData(ByVal sourceTable As System.Data.DataTable, ByVal destConnection As SqlConnection) ' old method: Lots of INSERT statements ' first, create the insert command that we will call over and over: destConnection.Open() Using ins As New SqlCommand("INSERT INTO [tblAppointmentDisposition] ([contactdate], [dnbnumber], [prospectname], [businessofficer], [phonemeeting], [followupcalldate2], [phonemeetingappt], [followupcalldate3], [appointmentdate], [appointmentlocation], [appointmentkept], [applicationgenerated], [applicationgenerated2], [applicationgenerated3], [comments], [newaccount], [futureopportunity]) VALUES (@contactdate, @dnbnumber, @prospectname, @businessofficer, @phonemeeting, @followupcalldate2, @phonemeetingappt, @followupcalldate3, @appointmentdate, @appointmentlocation, @appointmentkept, @applicationgenerated, @applicationgenerated2, @applicationgenerated3, @comments, @newaccount, @futureopportunity)", destConnection) ins.CommandType = CommandType.Text ins.Parameters.Add("@contactdate", SqlDbType.Text) ins.Parameters.Add("@dnbnumber", SqlDbType.Text) ins.Parameters.Add("@prospectname", SqlDbType.Text) ins.Parameters.Add("@businessofficer", SqlDbType.NVarChar) ins.Parameters.Add("@phonemeeting", SqlDbType.Text) ins.Parameters.Add("@followupcalldate2", SqlDbType.Text) ins.Parameters.Add("@phonemeetingappt", SqlDbType.Text) ins.Parameters.Add("@followupcalldate3", SqlDbType.Text) ins.Parameters.Add("@appointmentdate", SqlDbType.Text) ins.Parameters.Add("@appointmentlocation", SqlDbType.Text) ins.Parameters.Add("@appointmentkept", SqlDbType.Text) ins.Parameters.Add("@applicationgenerated", SqlDbType.Text) ins.Parameters.Add("@applicationgenerated2", SqlDbType.Text) ins.Parameters.Add("@applicationgenerated3", SqlDbType.Text) ins.Parameters.Add("@comments", SqlDbType.Text) ins.Parameters.Add("@newaccount", SqlDbType.Text) ins.Parameters.Add("@futureopportunity", SqlDbType.Text) ' and now, do the work: For Each r As DataRow In sourceTable.Rows For i As Integer = 0 To 16 ins.Parameters(i).Value = r(i) Next ins.ExecuteNonQuery() 'If System.Threading.Interlocked.Increment(rowscopied) Mod 10000 = 0 Then 'Console.WriteLine("-- copied {0} rows.", rowscopied) 'End If Next End Using destConnection.Close() End Sub
I am looking for best practice when passing a parameter to stored procedure that is not needed. For example, sometime the users will want the list to list only by certain state. Other times the user want all states. How can I make the SP to ignore the where clause if users want all states.
CREATE PROCEDURE usp_Example @State nvarchar(2) AS SELECT FirstName, LastName, State FROM SomeTable WHERE State = @FirstName; GO
Hello All,Does the BCP utility enable you to selectively import rows from a flatfile to a table ?For example:The first column in my flat file contains a record type - 1, 2..7I only need to import types 1, 2, & 3Can this be specified in the .fmt file ?Thanks in advancehharry
Hence you have a database which huge tables and a transactional replication (push subscriptions).
Now my question:
1. if I have to initialize a snapshot but I would like to do it without the snapshot agent, what methods are available?
2. Usually the distribution agent will request an initialize snapshot. How can I tell him, that I would like to use an alternative method and that the distribution agent should NOT request a snapshot?
3. Any suggestions about a good practive for materializing huge and big tables wihtout using the disitrbution agent (e.g. "switch off" replication, bcp table out of the primary site and bcp it into the target site, "start" distribution agent so that it doesn't request a snapshot).
I have a excel workbook with many sheets, in each sheet the first row has to be skipped and the second row contains the column information and thereafter are the records.
The Excel Source in SSIS just gives an option: check if the first row has column names.
But the first row for me is junk -- a link to parent or first sheet-- and has to be skipped and the second row has the column info.
How can this be accomplished .... any suggestions would be of great help!!!
Sample:
Main
id
desc
price
date
1
apple
1.0
1/1/1900
2
banana
2.0
1/1/2000
Main in the first row is actually a hyperlink ... once we click this it takes us to the first sheet in the workbook which has all sheet names as contents.
Is there a way to split at this point and put the 12 rows in a different location? The task is twofold - I don't need these control rows in my data and I need value of "records" to verify loaded number of rows.
UPDATED: After some testing I found out that the Flat File source does not see that footer at all. This is good and bad - I do want to load this metedat into some other tables.
I have a script that creates and populates several tables. However I only want this to occur if one table has a row count greater than zero. I'm trying to use GOTO to script to the end of the script. However I get the message "A GOTO statment references the label 'MYLABEL' but the label has not been declared." How can I do this.
I have something similiar to the following in my script: IF (SELECT COUNT(*) FROM MYTABLE) = 0 BEGIN PRINT 'NO ROWS FOUND' GOTO MYLABEL END
Hi All,I have this data file with fix length(see below). I am able to insertit into the database using bcp, but now I want to skip (do not insert)the row which start with letter 'S' into the database. Is there away todo it? By the way I am using -F2 option to skip the first record.Here is my data:Record 1 04XXX2 13106900240120042003040045061 Testing N POLYDOROS TRUSTEEE2 12621241640280041004040045633 What are they MARTIN &XXXXXS C1000003200400409850000059611000000500001000000001 9613000000576497500S X1000003200000209850000059613000000000000000000001 9613000000573497000Thanks for your help.Ted Lee
I am reading in a deliminated file. In the Script Transformation Editor, if the UPC does not past the checksum test, I want to throw the row out right then. I am not sure how to do that...but it is probably really simple.] Thanks, Linda
Here is my script:
' Microsoft SQL Server Integration Services user script component
' This is your new script component in Microsoft Visual Basic .NET
' ScriptMain is the entrypoint class for script components
'Option Strict Off
Imports System
Imports System.Data
Imports System.Math
Imports Microsoft.SqlServer.Dts.Pipeline.Wrapper
Imports Microsoft.SqlServer.Dts.Runtime.Wrapper
Public Class ScriptMain
Inherits UserComponent
Private Function DoubleTest(ByVal Value As String) As Boolean
Dim d As Double
If Not Double.TryParse(Value, d) Then
'Windows.Forms.MessageBox.Show(Value + " is not numeric")
In Flat File Source properties windows there's Preview node, when we check that node there's an option to skip the data in how many rows. Is it affect the result ?
i have this particular problem with the unpivot.The below is my flat file source.The dates can go upto 130 columns.this count can also vary.SM,SR,SB are again values repeating for diff instrument.They are the values of the instrument on the particular dates.This is a snap shot of one feed.Other feeds may have the dates differing.How do i read this file.
Problem 1:If i skip the first row and unpivot the 2nd row,then with the new feed,with new dates my SSIS package will bomb as it will not find the col names.
Problem 2:IF i uncheck the "Use first row as column headers" then the problem 1 is solved but the o/p will be
20080101
20061102
20061103 1.2
1.3
1.2.
1.5
.....and so on..
IS there any other way to fix this.These are feeds with the spread values of instruments on particular dates.Please help.
RUN 2.01E+11 132238 0 45 INSTRID DATATYPES 20081101 20061102 20061103 Z03369 SM 1.1 1.2 1.3 Z03369 SB 1.3 1.3 1.7
Is it possible to write a sql statement to skip aplpha numeric values? I got a field containing these values; 20, 70, 150, 140, 100, KORT, 90, 180, 160. And I'm trying to check if any value is bigger than 175 (@Limit), but I want to skip the value 'KORT'. So is it possible to check if a value is numeric or not? ISNULL( CONVERT(int, ProductVariant.Size), 0) > @Limit Regards, Sigurd
I'm trying to optimize a few batch import procedures we use in our processes.
It currently works like this:
1) Cursor loop cycles through all data to be imported from IMPORT table
2) For every record there is an attempted insert to PROD table in a TRY-CATCH check to see whether the record would pass all the primary key and foreign key constrains in PROD table
3) Only those that pass the TRY-CATCH check gets imported into PROD table
4) Every row gets logged into a separate LOG table, either with a comment like "Import OK" or "Error: foreign key violation in field 'my_id'"
The thing is, the procedure runs fine when I'm importing several thousands of records, but when it comes to hundreds of thousands, the speed becomes an issue, as I currently get 20 records per second and slowing...
There is no other code in that procedure, no queries. Just the Cursor cycle and the try-catch check.
Hello, I have the following code to iterate through each view in a SQLServer and call the "sp_refreshview" command against it. It worksgreat until it finds a view that is damaged, or otherwise cannot berefreshed. Then the whole routine stops working.Can someone please help me re-write this code so that any views thatfail the "sp_refreshview" command get skipped. I'm sure it's just amatter of putting some basic error trapping into the loop, but I've hada few goes at it and failed.Many thanks.DECLARE @DatabaseObject varchar(255)DECLARE ObjectCursor CURSORFOR SELECT table_name FROM information_schema.tables WHERE table_type ='view'OPEN ObjectCursorFETCH NEXT FROM ObjectCursor INTO @DatabaseObjectWHILE @@FETCH_STATUS = 0BEGINEXEC sp_refreshview @DatabaseObjectPrint @DatabaseObject + ' was successfully refreshed.'FETCH NEXT FROM ObjectCursor INTO @DatabaseObjectENDCLOSE ObjectCursorDEALLOCATE ObjectCursorGO
My replication application need to be able to skip the from-last-stop remaining logs (that means to skip the logs generated from the last stop time of my replication application), how can i realize it? thanks.
I have a .xlsx file, in that file I get data from the 3rd row. Using SSIS I am converting .xlsx file into .csv file. I am able to convert it but in the .csv file the data are populating from the first row itself. I want to get data in 3rd row it self.
I have a text filed in my table.I have sample data looks like <<some status>> << 3/9/2008 10:00:45 PM>> <<personname>>Im interested in searching <<some status>> and <<personname>> together by skipping date in between so my query set should return status and same person name i m looking for.
I can skip the first line with Transform Data Task, it look like can not skip the first line in BulkInsert. But bulkinsert is faster, anybody can help?